mariadb/storage/myisam/ha_myisam.cc

2204 lines
70 KiB
C++
Raw Normal View History

2011-07-04 01:25:49 +02:00
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
2000-07-31 21:29:14 +02:00
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
2000-07-31 21:29:14 +02:00
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
2000-07-31 21:29:14 +02:00
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
2011-06-30 17:46:53 +02:00
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
2000-07-31 21:29:14 +02:00
#ifdef USE_PRAGMA_IMPLEMENTATION
2000-07-31 21:29:14 +02:00
#pragma implementation // gcc: Class implementation
#endif
#define MYSQL_SERVER 1
#include "sql_priv.h"
2008-12-20 11:01:41 +01:00
#include "probes_mysql.h"
#include "key.h" // key_copy
#include "sql_plugin.h"
2000-07-31 21:29:14 +02:00
#include <m_ctype.h>
#include <my_bit.h>
2000-07-31 21:29:14 +02:00
#include <myisampack.h>
#include "ha_myisam.h"
#include <stdarg.h>
#include "myisamdef.h"
#include "rt_index.h"
#include "sql_table.h" // tablename_to_filename
#include "sql_class.h" // THD
2000-07-31 21:29:14 +02:00
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
ulonglong myisam_recover_options;
static ulong opt_myisam_block_size;
/* bits in myisam_recover_options */
const char *myisam_recover_names[] =
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
{ "DEFAULT", "BACKUP", "FORCE", "QUICK", "OFF", NullS};
TYPELIB myisam_recover_typelib= {array_elements(myisam_recover_names)-1,"",
myisam_recover_names, NULL};
const char *myisam_stats_method_names[] = {"nulls_unequal", "nulls_equal",
"nulls_ignored", NullS};
TYPELIB myisam_stats_method_typelib= {
array_elements(myisam_stats_method_names) - 1, "",
myisam_stats_method_names, NULL};
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
static MYSQL_SYSVAR_ULONG(block_size, opt_myisam_block_size,
PLUGIN_VAR_NOSYSVAR | PLUGIN_VAR_RQCMDARG,
"Block size to be used for MyISAM index pages", NULL, NULL,
MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH,
MI_MIN_KEY_BLOCK_LENGTH);
static MYSQL_SYSVAR_ULONG(data_pointer_size, myisam_data_pointer_size,
PLUGIN_VAR_RQCMDARG, "Default pointer size to be used for MyISAM tables",
NULL, NULL, 6, 2, 7, 1);
#define MB (1024*1024)
static MYSQL_SYSVAR_ULONGLONG(max_sort_file_size, myisam_max_temp_length,
PLUGIN_VAR_RQCMDARG, "Don't use the fast sort index method to created "
"index if the temporary file would get bigger than this", NULL, NULL,
LONG_MAX/MB*MB, 0, MAX_FILE_SIZE, MB);
static MYSQL_SYSVAR_SET(recover_options, myisam_recover_options,
PLUGIN_VAR_OPCMDARG|PLUGIN_VAR_READONLY,
"Syntax: myisam-recover-options[=option[,option...]], where option can be "
"DEFAULT, BACKUP, FORCE, QUICK, or OFF",
NULL, NULL, 0, &myisam_recover_typelib);
static MYSQL_THDVAR_ULONG(repair_threads, PLUGIN_VAR_RQCMDARG,
"If larger than 1, when repairing a MyISAM table all indexes will be "
"created in parallel, with one thread per index. The value of 1 "
"disables parallel repair", NULL, NULL,
1, 1, ULONG_MAX, 1);
static MYSQL_THDVAR_ULONG(sort_buffer_size, PLUGIN_VAR_RQCMDARG,
"The buffer that is allocated when sorting the index when doing "
"a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE", NULL, NULL,
8192*1024, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD), ULONG_MAX, 1);
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
static MYSQL_SYSVAR_BOOL(use_mmap, opt_myisam_use_mmap, PLUGIN_VAR_NOCMDARG,
"Use memory mapping for reading and writing MyISAM tables", NULL, NULL, FALSE);
static MYSQL_SYSVAR_ULONGLONG(mmap_size, myisam_mmap_size,
PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_READONLY, "Restricts the total memory "
"used for memory mapping of MySQL tables", NULL, NULL,
SIZE_T_MAX, MEMMAP_EXTRA_MARGIN, SIZE_T_MAX, 1);
static MYSQL_THDVAR_ENUM(stats_method, PLUGIN_VAR_RQCMDARG,
"Specifies how MyISAM index statistics collection code should "
"treat NULLs. Possible values of name are NULLS_UNEQUAL (default "
"behavior for 4.1 and later), NULLS_EQUAL (emulate 4.0 behavior), "
"and NULLS_IGNORED", NULL, NULL,
MI_STATS_METHOD_NULLS_NOT_EQUAL, &myisam_stats_method_typelib);
#ifndef DBUG_OFF
/**
Causes the thread to wait in a spin lock for a query kill signal.
This function is used by the test frame work to identify race conditions.
The signal is caught and ignored and the thread is not killed.
*/
static void debug_wait_for_kill(const char *info)
{
DBUG_ENTER("debug_wait_for_kill");
const char *prev_info;
THD *thd;
thd= current_thd;
prev_info= thd_proc_info(thd, info);
while(!thd->killed)
my_sleep(1000);
DBUG_PRINT("info", ("Exit debug_wait_for_kill"));
thd_proc_info(thd, prev_info);
DBUG_VOID_RETURN;
}
#endif
2000-07-31 21:29:14 +02:00
/*****************************************************************************
** MyISAM tables
*****************************************************************************/
static handler *myisam_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(hton, table);
}
2000-07-31 21:29:14 +02:00
// collect errors printed by mi_check routines
2000-07-31 21:29:14 +02:00
static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
const char *fmt, va_list args)
{
THD* thd = (THD*)param->thd;
Protocol *protocol= thd->protocol;
size_t length, msg_length;
2000-07-31 21:29:14 +02:00
char msgbuf[MI_MAX_MSG_BUF];
char name[NAME_LEN*2+2];
2000-07-31 21:29:14 +02:00
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
2000-07-31 21:29:14 +02:00
msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
2000-10-17 04:29:56 +02:00
DBUG_PRINT(msg_type,("message: %s",msgbuf));
if (!thd->vio_ok())
2000-07-31 21:29:14 +02:00
{
2009-06-29 15:17:01 +02:00
sql_print_error("%s", msgbuf);
2000-07-31 21:29:14 +02:00
return;
}
if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR |
T_AUTO_REPAIR))
{
my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME));
return;
}
length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) -
name);
/*
TODO: switch from protocol to push_warning here. The main reason we didn't
it yet is parallel repair. Due to following trace:
mi_check_print_msg/push_warning/sql_alloc/my_pthread_getspecific_ptr.
Also we likely need to lock mutex here (in both cases with protocol and
push_warning).
*/
if (param->need_print_msg_lock)
mysql_mutex_lock(&param->print_msg_mutex);
protocol->prepare_for_resend();
protocol->store(name, length, system_charset_info);
protocol->store(param->op_name, system_charset_info);
protocol->store(msg_type, system_charset_info);
protocol->store(msgbuf, msg_length, system_charset_info);
if (protocol->write())
sql_print_error("Failed on my_net_write, writing to stderr instead: %s\n",
msgbuf);
if (param->need_print_msg_lock)
mysql_mutex_unlock(&param->print_msg_mutex);
return;
2000-07-31 21:29:14 +02:00
}
/*
Convert TABLE object to MyISAM key and column definition
SYNOPSIS
table2myisam()
table_arg in TABLE object.
keydef_out out MyISAM key definition.
recinfo_out out MyISAM column definition.
records_out out Number of fields.
DESCRIPTION
This function will allocate and initialize MyISAM key and column
definition for further use in mi_create or for a check for underlying
table conformance in merge engine.
Bug#26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Bug 26377 - Deadlock with MERGE and FLUSH TABLE Bug 25038 - Waiting TRUNCATE Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Bug 19627 - temporary merge table locking Bug 27660 - Falcon: merge table possible Bug 30273 - merge tables: Can't lock file (errno: 155) The problems were: Bug 26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table 1. A thread trying to lock a MERGE table performs busy waiting while REPAIR TABLE or a similar table administration task is ongoing on one or more of its MyISAM tables. 2. A thread trying to lock a MERGE table performs busy waiting until all threads that did REPAIR TABLE or similar table administration tasks on one or more of its MyISAM tables in LOCK TABLES segments do UNLOCK TABLES. The difference against problem #1 is that the busy waiting takes place *after* the administration task. It is terminated by UNLOCK TABLES only. 3. Two FLUSH TABLES within a LOCK TABLES segment can invalidate the lock. This does *not* require a MERGE table. The first FLUSH TABLES can be replaced by any statement that requires other threads to reopen the table. In 5.0 and 5.1 a single FLUSH TABLES can provoke the problem. Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Trying DML on a MERGE table, which has a child locked and repaired by another thread, made an infinite loop in the server. Bug 26377 - Deadlock with MERGE and FLUSH TABLE Locking a MERGE table and its children in parent-child order and flushing the child deadlocked the server. Bug 25038 - Waiting TRUNCATE Truncating a MERGE child, while the MERGE table was in use, let the truncate fail instead of waiting for the table to become free. Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Repairing a child of an open MERGE table corrupted the child. It was necessary to FLUSH the child first. Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Flushing and optimizing locked MERGE children crashed the server. Bug 19627 - temporary merge table locking Use of a temporary MERGE table with non-temporary children could corrupt the children. Temporary tables are never locked. So we do now prohibit non-temporary chidlren of a temporary MERGE table. Bug 27660 - Falcon: merge table possible It was possible to create a MERGE table with non-MyISAM children. Bug 30273 - merge tables: Can't lock file (errno: 155) This was a Windows-only bug. Table administration statements sometimes failed with "Can't lock file (errno: 155)". These bugs are fixed by a new implementation of MERGE table open. When opening a MERGE table in open_tables() we do now add the child tables to the list of tables to be opened by open_tables() (the "query_list"). The children are not opened in the handler at this stage. After opening the parent, open_tables() opens each child from the now extended query_list. When the last child is opened, we remove the children from the query_list again and attach the children to the parent. This behaves similar to the old open. However it does not open the MyISAM tables directly, but grabs them from the already open children. When closing a MERGE table in close_thread_table() we detach the children only. Closing of the children is done implicitly because they are in thd->open_tables. For more detail see the comment at the top of ha_myisammrg.cc. Changed from open_ltable() to open_and_lock_tables() in all places that can be relevant for MERGE tables. The latter can handle tables added to the list on the fly. When open_ltable() was used in a loop over a list of tables, the list must be temporarily terminated after every table for open_and_lock_tables(). table_list->required_type is set to FRMTYPE_TABLE to avoid open of special tables. Handling of derived tables is suppressed. These details are handled by the new function open_n_lock_single_table(), which has nearly the same signature as open_ltable() and can replace it in most cases. In reopen_tables() some of the tables open by a thread can be closed and reopened. When a MERGE child is affected, the parent must be closed and reopened too. Closing of the parent is forced before the first child is closed. Reopen happens in the order of thd->open_tables. MERGE parents do not attach their children automatically at open. This is done after all tables are reopened. So all children are open when attaching them. Special lock handling like mysql_lock_abort() or mysql_lock_remove() needs to be suppressed for MERGE children or forwarded to the parent. This depends on the situation. In loops over all open tables one suppresses child lock handling. When a single table is touched, forwarding is done. Behavioral changes: =================== This patch changes the behavior of temporary MERGE tables. Temporary MERGE must have temporary children. The old behavior was wrong. A temporary table is not locked. Hence even non-temporary children were not locked. See Bug 19627 - temporary merge table locking. You cannot change the union list of a non-temporary MERGE table when LOCK TABLES is in effect. The following does *not* work: CREATE TABLE m1 ... ENGINE=MRG_MYISAM ...; LOCK TABLES t1 WRITE, t2 WRITE, m1 WRITE; ALTER TABLE m1 ... UNION=(t1,t2) ...; However, you can do this with a temporary MERGE table. You cannot create a MERGE table with CREATE ... SELECT, neither as a temporary MERGE table, nor as a non-temporary MERGE table. CREATE TABLE m1 ... ENGINE=MRG_MYISAM ... SELECT ...; Gives error message: table is not BASE TABLE.
2007-11-15 20:25:43 +01:00
The caller needs to free *recinfo_out after use. Since *recinfo_out
and *keydef_out are allocated with a my_multi_malloc, *keydef_out
is freed automatically when *recinfo_out is freed.
RETURN VALUE
0 OK
!0 error code
*/
int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
MI_COLUMNDEF **recinfo_out, uint *records_out)
{
uint i, j, recpos, minpos, fieldpos, temp_length, length;
enum ha_base_keytype type= HA_KEYTYPE_BINARY;
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
uchar *record;
KEY *pos;
MI_KEYDEF *keydef;
MI_COLUMNDEF *recinfo, *recinfo_pos;
HA_KEYSEG *keyseg;
TABLE_SHARE *share= table_arg->s;
uint options= share->db_options_in_use;
DBUG_ENTER("table2myisam");
if (!(my_multi_malloc(MYF(MY_WME),
recinfo_out, (share->fields * 2 + 2) * sizeof(MI_COLUMNDEF),
keydef_out, share->keys * sizeof(MI_KEYDEF),
&keyseg,
(share->key_parts + share->keys) * sizeof(HA_KEYSEG),
NullS)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
keydef= *keydef_out;
recinfo= *recinfo_out;
pos= table_arg->key_info;
for (i= 0; i < share->keys; i++, pos++)
{
2007-11-10 18:39:30 +01:00
keydef[i].flag= ((uint16) pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL));
keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ?
(pos->flags & HA_SPATIAL ? HA_KEY_ALG_RTREE : HA_KEY_ALG_BTREE) :
pos->algorithm;
keydef[i].block_length= pos->block_size;
keydef[i].seg= keyseg;
keydef[i].keysegs= pos->key_parts;
for (j= 0; j < pos->key_parts; j++)
{
Field *field= pos->key_part[j].field;
type= field->key_type();
keydef[i].seg[j].flag= pos->key_part[j].key_part_flag;
if (options & HA_OPTION_PACK_KEYS ||
(pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY |
HA_SPACE_PACK_USED)))
{
if (pos->key_part[j].length > 8 &&
(type == HA_KEYTYPE_TEXT ||
type == HA_KEYTYPE_NUM ||
(type == HA_KEYTYPE_BINARY && !field->zero_pack())))
{
/* No blobs here */
if (j == 0)
keydef[i].flag|= HA_PACK_KEY;
if (!(field->flags & ZEROFILL_FLAG) &&
(field->type() == MYSQL_TYPE_STRING ||
field->type() == MYSQL_TYPE_VAR_STRING ||
((int) (pos->key_part[j].length - field->decimals())) >= 4))
keydef[i].seg[j].flag|= HA_SPACE_PACK;
}
else if (j == 0 && (!(pos->flags & HA_NOSAME) || pos->key_length > 16))
keydef[i].flag|= HA_BINARY_PACK_KEY;
}
keydef[i].seg[j].type= (int) type;
keydef[i].seg[j].start= pos->key_part[j].offset;
keydef[i].seg[j].length= pos->key_part[j].length;
keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end=
keydef[i].seg[j].bit_length= 0;
keydef[i].seg[j].bit_pos= 0;
keydef[i].seg[j].language= field->charset_for_protocol()->number;
if (field->null_ptr)
{
keydef[i].seg[j].null_bit= field->null_bit;
keydef[i].seg[j].null_pos= (uint) (field->null_ptr-
(uchar*) table_arg->record[0]);
}
else
{
keydef[i].seg[j].null_bit= 0;
keydef[i].seg[j].null_pos= 0;
}
if (field->type() == MYSQL_TYPE_BLOB ||
field->type() == MYSQL_TYPE_GEOMETRY)
{
keydef[i].seg[j].flag|= HA_BLOB_PART;
/* save number of bytes used to pack length */
keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
share->blob_ptr_size);
}
else if (field->type() == MYSQL_TYPE_BIT)
{
keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len;
keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs;
keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr -
(uchar*) table_arg->record[0]);
}
}
keyseg+= pos->key_parts;
}
if (table_arg->found_next_number_field)
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
record= table_arg->record[0];
recpos= 0;
recinfo_pos= recinfo;
while (recpos < (uint) share->reclength)
{
Field **field, *found= 0;
minpos= share->reclength;
length= 0;
for (field= table_arg->field; *field; field++)
{
if ((fieldpos= (*field)->offset(record)) >= recpos &&
fieldpos <= minpos)
{
/* skip null fields */
if (!(temp_length= (*field)->pack_length_in_rec()))
continue; /* Skip null-fields */
if (! found || fieldpos < minpos ||
(fieldpos == minpos && temp_length < length))
{
minpos= fieldpos;
found= *field;
length= temp_length;
}
}
}
DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d",
(long) found, recpos, minpos, length));
if (recpos != minpos)
{ // Reserved space (Null bits?)
bzero((char*) recinfo_pos, sizeof(*recinfo_pos));
recinfo_pos->type= (int) FIELD_NORMAL;
recinfo_pos++->length= (uint16) (minpos - recpos);
}
if (!found)
break;
if (found->flags & BLOB_FLAG)
recinfo_pos->type= (int) FIELD_BLOB;
else if (found->type() == MYSQL_TYPE_VARCHAR)
recinfo_pos->type= FIELD_VARCHAR;
else if (!(options & HA_OPTION_PACK_RECORD))
recinfo_pos->type= (int) FIELD_NORMAL;
else if (found->zero_pack())
recinfo_pos->type= (int) FIELD_SKIP_ZERO;
else
recinfo_pos->type= (int) ((length <= 3 ||
(found->flags & ZEROFILL_FLAG)) ?
FIELD_NORMAL :
found->type() == MYSQL_TYPE_STRING ||
found->type() == MYSQL_TYPE_VAR_STRING ?
FIELD_SKIP_ENDSPACE :
FIELD_SKIP_PRESPACE);
if (found->null_ptr)
{
recinfo_pos->null_bit= found->null_bit;
recinfo_pos->null_pos= (uint) (found->null_ptr -
(uchar*) table_arg->record[0]);
}
else
{
recinfo_pos->null_bit= 0;
recinfo_pos->null_pos= 0;
}
(recinfo_pos++)->length= (uint16) length;
recpos= minpos + length;
DBUG_PRINT("loop", ("length: %d type: %d",
recinfo_pos[-1].length,recinfo_pos[-1].type));
}
*records_out= (uint) (recinfo_pos - recinfo);
DBUG_RETURN(0);
}
/*
Check for underlying table conformance
SYNOPSIS
check_definition()
t1_keyinfo in First table key definition
t1_recinfo in First table record definition
t1_keys in Number of keys in first table
t1_recs in Number of records in first table
t2_keyinfo in Second table key definition
t2_recinfo in Second table record definition
t2_keys in Number of keys in second table
t2_recs in Number of records in second table
strict in Strict check switch
table in handle to the table object
DESCRIPTION
This function compares two MyISAM definitions. By intention it was done
to compare merge table definition against underlying table definition.
It may also be used to compare dot-frm and MYI definitions of MyISAM
table as well to compare different MyISAM table definitions.
For merge table it is not required that number of keys in merge table
must exactly match number of keys in underlying table. When calling this
function for underlying table conformance check, 'strict' flag must be
set to false, and converted merge definition must be passed as t1_*.
Otherwise 'strict' flag must be set to 1 and it is not required to pass
converted dot-frm definition as t1_*.
For compatibility reasons we relax some checks, specifically:
- 4.0 (and earlier versions) always set key_alg to 0.
- 4.0 (and earlier versions) have the same language for all keysegs.
RETURN VALUE
0 - Equal definitions.
1 - Different definitions.
TODO
- compare FULLTEXT keys;
- compare SPATIAL keys;
- compare FIELD_SKIP_ZERO which is converted to FIELD_NORMAL correctly
(should be corretly detected in table2myisam).
*/
int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo,
uint t1_keys, uint t1_recs,
MI_KEYDEF *t2_keyinfo, MI_COLUMNDEF *t2_recinfo,
uint t2_keys, uint t2_recs, bool strict, TABLE *table_arg)
{
uint i, j;
DBUG_ENTER("check_definition");
my_bool mysql_40_compat= table_arg && table_arg->s->frm_version < FRM_VER_TRUE_VARCHAR;
if ((strict ? t1_keys != t2_keys : t1_keys > t2_keys))
{
DBUG_PRINT("error", ("Number of keys differs: t1_keys=%u, t2_keys=%u",
t1_keys, t2_keys));
DBUG_RETURN(1);
}
if (t1_recs != t2_recs)
{
DBUG_PRINT("error", ("Number of recs differs: t1_recs=%u, t2_recs=%u",
t1_recs, t2_recs));
DBUG_RETURN(1);
}
for (i= 0; i < t1_keys; i++)
{
HA_KEYSEG *t1_keysegs= t1_keyinfo[i].seg;
HA_KEYSEG *t2_keysegs= t2_keyinfo[i].seg;
if (t1_keyinfo[i].flag & HA_FULLTEXT && t2_keyinfo[i].flag & HA_FULLTEXT)
continue;
else if (t1_keyinfo[i].flag & HA_FULLTEXT ||
t2_keyinfo[i].flag & HA_FULLTEXT)
{
DBUG_PRINT("error", ("Key %d has different definition", i));
DBUG_PRINT("error", ("t1_fulltext= %d, t2_fulltext=%d",
test(t1_keyinfo[i].flag & HA_FULLTEXT),
test(t2_keyinfo[i].flag & HA_FULLTEXT)));
DBUG_RETURN(1);
}
if (t1_keyinfo[i].flag & HA_SPATIAL && t2_keyinfo[i].flag & HA_SPATIAL)
continue;
else if (t1_keyinfo[i].flag & HA_SPATIAL ||
t2_keyinfo[i].flag & HA_SPATIAL)
{
DBUG_PRINT("error", ("Key %d has different definition", i));
DBUG_PRINT("error", ("t1_spatial= %d, t2_spatial=%d",
test(t1_keyinfo[i].flag & HA_SPATIAL),
test(t2_keyinfo[i].flag & HA_SPATIAL)));
DBUG_RETURN(1);
}
if ((!mysql_40_compat &&
t1_keyinfo[i].key_alg != t2_keyinfo[i].key_alg) ||
t1_keyinfo[i].keysegs != t2_keyinfo[i].keysegs)
{
DBUG_PRINT("error", ("Key %d has different definition", i));
DBUG_PRINT("error", ("t1_keysegs=%d, t1_key_alg=%d",
t1_keyinfo[i].keysegs, t1_keyinfo[i].key_alg));
DBUG_PRINT("error", ("t2_keysegs=%d, t2_key_alg=%d",
t2_keyinfo[i].keysegs, t2_keyinfo[i].key_alg));
DBUG_RETURN(1);
}
for (j= t1_keyinfo[i].keysegs; j--;)
{
uint8 t1_keysegs_j__type= t1_keysegs[j].type;
/*
Table migration from 4.1 to 5.1. In 5.1 a *TEXT key part is
always HA_KEYTYPE_VARTEXT2. In 4.1 we had only the equivalent of
HA_KEYTYPE_VARTEXT1. Since we treat both the same on MyISAM
level, we can ignore a mismatch between these types.
*/
if ((t1_keysegs[j].flag & HA_BLOB_PART) &&
(t2_keysegs[j].flag & HA_BLOB_PART))
{
if ((t1_keysegs_j__type == HA_KEYTYPE_VARTEXT2) &&
(t2_keysegs[j].type == HA_KEYTYPE_VARTEXT1))
t1_keysegs_j__type= HA_KEYTYPE_VARTEXT1; /* purecov: tested */
else if ((t1_keysegs_j__type == HA_KEYTYPE_VARBINARY2) &&
(t2_keysegs[j].type == HA_KEYTYPE_VARBINARY1))
t1_keysegs_j__type= HA_KEYTYPE_VARBINARY1; /* purecov: inspected */
}
if ((!mysql_40_compat &&
t1_keysegs[j].language != t2_keysegs[j].language) ||
t1_keysegs_j__type != t2_keysegs[j].type ||
t1_keysegs[j].null_bit != t2_keysegs[j].null_bit ||
t1_keysegs[j].length != t2_keysegs[j].length)
{
DBUG_PRINT("error", ("Key segment %d (key %d) has different "
"definition", j, i));
DBUG_PRINT("error", ("t1_type=%d, t1_language=%d, t1_null_bit=%d, "
"t1_length=%d",
t1_keysegs[j].type, t1_keysegs[j].language,
t1_keysegs[j].null_bit, t1_keysegs[j].length));
DBUG_PRINT("error", ("t2_type=%d, t2_language=%d, t2_null_bit=%d, "
"t2_length=%d",
t2_keysegs[j].type, t2_keysegs[j].language,
t2_keysegs[j].null_bit, t2_keysegs[j].length));
DBUG_RETURN(1);
}
}
}
for (i= 0; i < t1_recs; i++)
{
MI_COLUMNDEF *t1_rec= &t1_recinfo[i];
MI_COLUMNDEF *t2_rec= &t2_recinfo[i];
/*
FIELD_SKIP_ZERO can be changed to FIELD_NORMAL in mi_create,
see NOTE1 in mi_create.c
*/
if ((t1_rec->type != t2_rec->type &&
!(t1_rec->type == (int) FIELD_SKIP_ZERO &&
t1_rec->length == 1 &&
t2_rec->type == (int) FIELD_NORMAL)) ||
t1_rec->length != t2_rec->length ||
t1_rec->null_bit != t2_rec->null_bit)
{
DBUG_PRINT("error", ("Field %d has different definition", i));
DBUG_PRINT("error", ("t1_type=%d, t1_length=%d, t1_null_bit=%d",
t1_rec->type, t1_rec->length, t1_rec->null_bit));
DBUG_PRINT("error", ("t2_type=%d, t2_length=%d, t2_null_bit=%d",
t2_rec->type, t2_rec->length, t2_rec->null_bit));
DBUG_RETURN(1);
}
}
DBUG_RETURN(0);
}
2000-07-31 21:29:14 +02:00
extern "C" {
2004-07-12 06:43:38 +02:00
volatile int *killed_ptr(MI_CHECK *param)
2002-10-22 02:25:36 +02:00
{
2004-07-12 06:43:38 +02:00
/* In theory Unsafe conversion, but should be ok for now */
return (int*) &(((THD *)(param->thd))->killed);
2002-10-22 02:25:36 +02:00
}
2000-07-31 21:29:14 +02:00
void mi_check_print_error(MI_CHECK *param, const char *fmt,...)
{
param->error_printed|=1;
param->out_flag|= O_DATA_LOST;
2000-07-31 21:29:14 +02:00
va_list args;
va_start(args, fmt);
mi_check_print_msg(param, "error", fmt, args);
va_end(args);
}
void mi_check_print_info(MI_CHECK *param, const char *fmt,...)
{
va_list args;
va_start(args, fmt);
mi_check_print_msg(param, "info", fmt, args);
va_end(args);
}
void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
{
param->warning_printed=1;
param->out_flag|= O_DATA_LOST;
2000-07-31 21:29:14 +02:00
va_list args;
va_start(args, fmt);
mi_check_print_msg(param, "warning", fmt, args);
va_end(args);
}
/**
Report list of threads (and queries) accessing a table, thread_id of a
thread that detected corruption, ource file name and line number where
this corruption was detected, optional extra information (string).
This function is intended to be used when table corruption is detected.
@param[in] file MI_INFO object.
@param[in] message Optional error message.
@param[in] sfile Name of source file.
@param[in] sline Line number in source file.
@return void
*/
void _mi_report_crashed(MI_INFO *file, const char *message,
const char *sfile, uint sline)
{
THD *cur_thd;
LIST *element;
char buf[1024];
mysql_mutex_lock(&file->s->intern_lock);
if ((cur_thd= (THD*) file->in_use.data))
sql_print_error("Got an error from thread_id=%lu, %s:%d", cur_thd->thread_id,
sfile, sline);
else
sql_print_error("Got an error from unknown thread, %s:%d", sfile, sline);
if (message)
sql_print_error("%s", message);
for (element= file->s->in_use; element; element= list_rest(element))
{
THD *thd= (THD*) element->data;
sql_print_error("%s", thd ? thd_security_context(thd, buf, sizeof(buf), 0)
: "Unknown thread accessing table");
}
mysql_mutex_unlock(&file->s->intern_lock);
}
2000-07-31 21:29:14 +02:00
}
ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), file(0),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS |
HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS |
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_REPAIR),
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
can_enable_indexes(1)
{}
handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root)
{
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(name,
mem_root));
if (new_handler)
new_handler->file->state= file->state;
return new_handler;
}
static const char *ha_myisam_exts[] = {
".MYI",
".MYD",
NullS
};
2000-07-31 21:29:14 +02:00
const char **ha_myisam::bas_ext() const
{
return ha_myisam_exts;
}
2000-07-31 21:29:14 +02:00
const char *ha_myisam::index_type(uint key_number)
{
return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
"FULLTEXT" :
(table->key_info[key_number].flags & HA_SPATIAL) ?
"SPATIAL" :
(table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
"RTREE" :
"BTREE");
}
WL#3984 (Revise locking of mysql.general_log and mysql.slow_log) Bug#25422 (Hang with log tables) Bug 17876 (Truncating mysql.slow_log in a SP after using cursor locks the thread) Bug 23044 (Warnings on flush of a log table) Bug 29129 (Resetting general_log while the GLOBAL READ LOCK is set causes a deadlock) Prior to this fix, the server would hang when performing concurrent ALTER TABLE or TRUNCATE TABLE statements against the LOG TABLES, which are mysql.general_log and mysql.slow_log. The root cause traces to the following code: in sql_base.cc, open_table() if (table->in_use != thd) { /* wait_for_condition will unlock LOCK_open for us */ wait_for_condition(thd, &LOCK_open, &COND_refresh); } The problem with this code is that the current implementation of the LOGGER creates 'fake' THD objects, like - Log_to_csv_event_handler::general_log_thd - Log_to_csv_event_handler::slow_log_thd which are not associated to a real thread running in the server, so that waiting for these non-existing threads to release table locks cause the dead lock. In general, the design of Log_to_csv_event_handler does not fit into the general architecture of the server, so that the concept of general_log_thd and slow_log_thd has to be abandoned: - this implementation does not work with table locking - it will not work with commands like SHOW PROCESSLIST - having the log tables always opened does not integrate well with DDL operations / FLUSH TABLES / SET GLOBAL READ_ONLY With this patch, the fundamental design of the LOGGER has been changed to: - always open and close a log table when writing a log - remove totally the usage of fake THD objects - clarify how locking of log tables is implemented in general. See WL#3984 for details related to the new locking design. Additional changes (misc bugs exposed and fixed): 1) mysqldump which would ignore some tables in dump_all_tables_in_db(), but forget to ignore the same in dump_all_views_in_db(). 2) mysqldump would also issue an empty "LOCK TABLE" command when all the tables to lock are to be ignored (numrows == 0), instead of not issuing the query. 3) Internal errors handlers could intercept errors but not warnings (see sql_error.cc). 4) Implementing a nested call to open tables, for the performance schema tables, exposed an existing bug in remove_table_from_cache(), which would perform: in_use->some_tables_deleted=1; against another thread, without any consideration about thread locking. This call inside remove_table_from_cache() was not required anyway, since calling mysql_lock_abort() takes care of aborting -- cleanly -- threads that might hold a lock on a table. This line (in_use->some_tables_deleted=1) has been removed.
2007-07-27 08:31:06 +02:00
/* Name is here without an extension */
int ha_myisam::open(const char *name, int mode, uint test_if_locked)
2000-07-31 21:29:14 +02:00
{
MI_KEYDEF *keyinfo;
MI_COLUMNDEF *recinfo= 0;
uint recs;
uint i;
/*
If the user wants to have memory mapped data files, add an
open_flag. Do not memory map temporary tables because they are
expected to be inserted and thus extended a lot. Memory mapping is
efficient for files that keep their size, but very inefficient for
growing files. Using an open_flag instead of calling mi_extra(...
HA_EXTRA_MMAP ...) after mi_open() has the advantage that the
mapping is not repeated for every open, but just done on the initial
open, when the MyISAM share is created. Everytime the server
requires to open a new instance of a table it calls this method. We
will always supply HA_OPEN_MMAP for a permanent table. However, the
MyISAM storage engine will ignore this flag if this is a secondary
open of a table that is in use by other threads already (if the
MyISAM share exists already).
*/
if (!(test_if_locked & HA_OPEN_TMP_TABLE) && opt_myisam_use_mmap)
test_if_locked|= HA_OPEN_MMAP;
if (!(file=mi_open(name, mode, test_if_locked | HA_OPEN_FROM_SQL_LAYER)))
2000-07-31 21:29:14 +02:00
return (my_errno ? my_errno : -1);
if (!table->s->tmp_table) /* No need to perform a check for tmp table */
{
if ((my_errno= table2myisam(table, &keyinfo, &recinfo, &recs)))
{
/* purecov: begin inspected */
DBUG_PRINT("error", ("Failed to convert TABLE object to MyISAM "
"key and column definition"));
goto err;
/* purecov: end */
}
if (check_definition(keyinfo, recinfo, table->s->keys, recs,
file->s->keyinfo, file->s->rec,
file->s->base.keys, file->s->base.fields,
true, table))
{
/* purecov: begin inspected */
my_errno= HA_ERR_CRASHED;
goto err;
/* purecov: end */
}
}
if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE))
(void) mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0);
2000-07-31 21:29:14 +02:00
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
(void) mi_extra(file, HA_EXTRA_WAIT_LOCK, 0);
if (!table->s->db_record_offset)
int_table_flags|=HA_REC_NOT_IN_SEQ;
if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
int_table_flags|=HA_HAS_CHECKSUM;
for (i= 0; i < table->s->keys; i++)
{
plugin_ref parser= table->key_info[i].parser;
if (table->key_info[i].flags & HA_USES_PARSER)
file->s->keyinfo[i].parser=
(struct st_mysql_ftparser *)plugin_decl(parser)->info;
table->key_info[i].block_size= file->s->keyinfo[i].block_length;
}
2007-03-22 21:28:28 +01:00
my_errno= 0;
goto end;
err:
this->close();
2007-03-22 21:28:28 +01:00
end:
/*
Both recinfo and keydef are allocated by my_multi_malloc(), thus only
recinfo must be freed.
*/
if (recinfo)
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled Essentially, the problem is that safemalloc is excruciatingly slow as it checks all allocated blocks for overrun at each memory management primitive, yielding a almost exponential slowdown for the memory management functions (malloc, realloc, free). The overrun check basically consists of verifying some bytes of a block for certain magic keys, which catches some simple forms of overrun. Another minor problem is violation of aliasing rules and that its own internal list of blocks is prone to corruption. Another issue with safemalloc is rather the maintenance cost as the tool has a significant impact on the server code. Given the magnitude of memory debuggers available nowadays, especially those that are provided with the platform malloc implementation, maintenance of a in-house and largely obsolete memory debugger becomes a burden that is not worth the effort due to its slowness and lack of support for detecting more common forms of heap corruption. Since there are third-party tools that can provide the same functionality at a lower or comparable performance cost, the solution is to simply remove safemalloc. Third-party tools can provide the same functionality at a lower or comparable performance cost. The removal of safemalloc also allows a simplification of the malloc wrappers, removing quite a bit of kludge: redefinition of my_malloc, my_free and the removal of the unused second argument of my_free. Since free() always check whether the supplied pointer is null, redudant checks are also removed. Also, this patch adds unit testing for my_malloc and moves my_realloc implementation into the same file as the other memory allocation primitives.
2010-07-08 23:20:08 +02:00
my_free(recinfo);
return my_errno;
2000-07-31 21:29:14 +02:00
}
int ha_myisam::close(void)
{
MI_INFO *tmp=file;
file=0;
return mi_close(tmp);
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::write_row(uchar *buf)
2000-07-31 21:29:14 +02:00
{
ha_statistic_increment(&SSV::ha_write_count);
2001-08-30 19:38:46 +02:00
/* If we have a timestamp column, update it to the current time */
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
2001-08-30 19:38:46 +02:00
/*
If we have an auto_increment column and we are writing a changed row
or a new row, then update the auto_increment value in the record.
*/
2000-07-31 21:29:14 +02:00
if (table->next_number_field && buf == table->record[0])
{
int error;
if ((error= update_auto_increment()))
return error;
}
2000-07-31 21:29:14 +02:00
return mi_write(file,buf);
}
int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
{
if (!file) return HA_ADMIN_INTERNAL_ERROR;
int error;
2000-07-31 21:29:14 +02:00
MI_CHECK param;
MYISAM_SHARE* share = file->s;
const char *old_proc_info=thd->proc_info;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Checking table");
2000-07-31 21:29:14 +02:00
myisamchk_init(&param);
param.thd = thd;
param.op_name = "check";
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag = check_opt->flags | T_CHECK | T_SILENT;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
2000-07-31 21:29:14 +02:00
if (!(table->db_stat & HA_READ_ONLY))
param.testflag|= T_STATISTICS;
param.using_global_keycache = 1;
if (!mi_is_crashed(file) &&
(((param.testflag & T_CHECK_ONLY_CHANGED) &&
!(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR)) &&
share->state.open_count == 0) ||
((param.testflag & T_FAST) && (share->state.open_count ==
(uint) (share->global_changed ? 1 : 0)))))
return HA_ADMIN_ALREADY_DONE;
error = chk_status(&param, file); // Not fatal
2000-07-31 21:29:14 +02:00
error = chk_size(&param, file);
if (!error)
error |= chk_del(&param, file, param.testflag);
if (!error)
error = chk_key(&param, file);
if (!error)
2000-07-31 21:29:14 +02:00
{
if ((!(param.testflag & T_QUICK) &&
((share->options &
(HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ||
(param.testflag & (T_EXTEND | T_MEDIUM)))) ||
mi_is_crashed(file))
2000-07-31 21:29:14 +02:00
{
uint old_testflag=param.testflag;
param.testflag|=T_MEDIUM;
if (!(error= init_io_cache(&param.read_cache, file->dfile,
my_default_record_cache_size, READ_CACHE,
share->pack.header_length, 1, MYF(MY_WME))))
{
error= chk_data_link(&param, file, param.testflag & T_EXTEND);
end_io_cache(&(param.read_cache));
}
param.testflag= old_testflag;
2000-07-31 21:29:14 +02:00
}
}
if (!error)
{
2000-08-18 11:56:57 +02:00
if ((share->state.changed & (STATE_CHANGED |
STATE_CRASHED_ON_REPAIR |
STATE_CRASHED | STATE_NOT_ANALYZED)) ||
(param.testflag & T_STATISTICS) ||
mi_is_crashed(file))
2000-07-31 21:29:14 +02:00
{
file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
mysql_mutex_lock(&share->intern_lock);
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
2000-07-31 21:29:14 +02:00
if (!(table->db_stat & HA_READ_ONLY))
error=update_state_info(&param,file,UPDATE_TIME | UPDATE_OPEN_COUNT |
UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
2000-07-31 21:29:14 +02:00
}
}
else if (!mi_is_crashed(file) && !thd->killed)
2000-07-31 21:29:14 +02:00
{
mi_mark_crashed(file);
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
}
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, old_proc_info);
return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
2000-07-31 21:29:14 +02:00
}
/*
analyze the key distribution in the table
As the table may be only locked for read, we have to take into account that
two threads may do an analyze at the same time!
*/
int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
2000-07-31 21:29:14 +02:00
{
int error=0;
2000-07-31 21:29:14 +02:00
MI_CHECK param;
MYISAM_SHARE* share = file->s;
2000-07-31 21:29:14 +02:00
myisamchk_init(&param);
param.thd = thd;
param.op_name= "analyze";
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
T_DONT_CHECK_CHECKSUM);
2000-07-31 21:29:14 +02:00
param.using_global_keycache = 1;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
2000-07-31 21:29:14 +02:00
if (!(share->state.changed & STATE_NOT_ANALYZED))
return HA_ADMIN_ALREADY_DONE;
error = chk_key(&param, file);
if (!error)
{
mysql_mutex_lock(&share->intern_lock);
error=update_state_info(&param,file,UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
2000-07-31 21:29:14 +02:00
}
else if (!mi_is_crashed(file) && !thd->killed)
mi_mark_crashed(file);
return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
2000-07-31 21:29:14 +02:00
}
int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
2000-07-31 21:29:14 +02:00
{
int error;
2000-07-31 21:29:14 +02:00
MI_CHECK param;
ha_rows start_records;
2000-10-23 14:35:42 +02:00
if (!file) return HA_ADMIN_INTERNAL_ERROR;
2000-07-31 21:29:14 +02:00
myisamchk_init(&param);
param.thd = thd;
param.op_name= "repair";
param.testflag= ((check_opt->flags & ~(T_EXTEND)) |
T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM |
(check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT));
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
start_records=file->state->records;
while ((error=repair(thd,param,0)) && param.retry_repair)
{
param.retry_repair=0;
if (test_all_bits(param.testflag,
(uint) (T_RETRY_WITHOUT_QUICK | T_QUICK)))
{
param.testflag&= ~T_RETRY_WITHOUT_QUICK;
2004-09-04 20:17:09 +02:00
sql_print_information("Retrying repair of: '%s' without quick",
table->s->path.str);
continue;
}
param.testflag&= ~T_QUICK;
if ((param.testflag & T_REP_BY_SORT))
{
2000-10-17 04:29:56 +02:00
param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP;
2004-09-04 20:17:09 +02:00
sql_print_information("Retrying repair of: '%s' with keycache",
table->s->path.str);
continue;
}
break;
}
2001-03-11 20:20:15 +01:00
if (!error && start_records != file->state->records &&
!(check_opt->flags & T_VERY_SILENT))
{
char llbuff[22],llbuff2[22];
2004-09-04 20:17:09 +02:00
sql_print_information("Found %s of %s rows when repairing '%s'",
llstr(file->state->records, llbuff),
llstr(start_records, llbuff2),
table->s->path.str);
}
return error;
}
int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
{
int error;
if (!file) return HA_ADMIN_INTERNAL_ERROR;
MI_CHECK param;
myisamchk_init(&param);
param.thd = thd;
param.op_name= "optimize";
param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX);
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
if ((error= repair(thd,param,1)) && param.retry_repair)
{
sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying",
my_errno, param.db_name, param.table_name);
param.testflag&= ~T_REP_BY_SORT;
error= repair(thd,param,1);
}
return error;
}
int ha_myisam::repair(THD *thd, MI_CHECK &param, bool do_optimize)
{
int error=0;
uint local_testflag=param.testflag;
bool optimize_done= !do_optimize, statistics_done=0;
const char *old_proc_info=thd->proc_info;
char fixed_name[FN_REFLEN];
MYISAM_SHARE* share = file->s;
ha_rows rows= file->state->records;
2000-10-17 04:29:56 +02:00
DBUG_ENTER("ha_myisam::repair");
param.db_name= table->s->db.str;
param.table_name= table->alias;
2000-07-31 21:29:14 +02:00
param.tmpfile_createflag = O_RDWR | O_TRUNC;
param.using_global_keycache = 1;
param.thd= thd;
param.tmpdir= &mysql_tmpdir_list;
param.out_flag= 0;
strmov(fixed_name,file->filename);
// Release latches since this can take a long time
ha_release_temporary_latches(thd);
// Don't lock tables if we have used LOCK TABLE
if (! thd->locked_tables_mode &&
mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
2000-10-23 14:35:42 +02:00
{
mi_check_print_error(&param,ER(ER_CANT_LOCK),my_errno);
DBUG_RETURN(HA_ADMIN_FAILED);
}
if (!do_optimize ||
((file->state->del || share->state.split != file->state->records) &&
(!(param.testflag & T_QUICK) ||
!(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))))
{
2002-08-31 22:42:41 +02:00
ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
mi_get_mask_all_keys_active(share->base.keys) :
share->state.key_map);
uint testflag=param.testflag;
#ifdef HAVE_MMAP
bool remap= test(share->file_map);
/*
mi_repair*() functions family use file I/O even if memory
mapping is available.
Since mixing mmap I/O and file I/O may cause various artifacts,
memory mapping must be disabled.
*/
if (remap)
mi_munmap_file(file);
#endif
if (mi_test_if_sort_rep(file,file->state->records,key_map,0) &&
(local_testflag & T_REP_BY_SORT))
{
local_testflag|= T_STATISTICS;
param.testflag|= T_STATISTICS; // We get this for free
statistics_done=1;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
if (THDVAR(thd, repair_threads)>1)
2003-05-04 18:43:37 +02:00
{
char buf[40];
/* TODO: respect myisam_repair_threads variable */
my_snprintf(buf, 40, "Repair with %d threads", my_count_bits(key_map));
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, buf);
2003-05-04 18:43:37 +02:00
error = mi_repair_parallel(&param, file, fixed_name,
param.testflag & T_QUICK);
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Repair done"); // to reset proc_info, as
2003-05-04 18:43:37 +02:00
// it was pointing to local buffer
}
else
{
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Repair by sorting");
2003-05-04 18:43:37 +02:00
error = mi_repair_by_sort(&param, file, fixed_name,
param.testflag & T_QUICK);
}
}
else
{
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Repair with keycache");
param.testflag &= ~T_REP_BY_SORT;
error= mi_repair(&param, file, fixed_name,
param.testflag & T_QUICK);
}
#ifdef HAVE_MMAP
if (remap)
mi_dynmap_file(file, file->state->data_file_length);
#endif
param.testflag=testflag;
optimize_done=1;
}
if (!error)
{
if ((local_testflag & T_SORT_INDEX) &&
(share->state.changed & STATE_NOT_SORTED_PAGES))
{
optimize_done=1;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Sorting index");
error=mi_sort_index(&param,file,fixed_name);
}
if (!statistics_done && (local_testflag & T_STATISTICS))
{
if (share->state.changed & STATE_NOT_ANALYZED)
{
optimize_done=1;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Analyzing");
error = chk_key(&param, file);
}
else
local_testflag&= ~T_STATISTICS; // Don't update statistics
}
}
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Saving state");
2000-07-31 21:29:14 +02:00
if (!error)
{
if ((share->state.changed & STATE_CHANGED) || mi_is_crashed(file))
2000-07-31 21:29:14 +02:00
{
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
2000-07-31 21:29:14 +02:00
file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
}
2002-11-14 22:39:46 +01:00
/*
the following 'if', thought conceptually wrong,
is a useful optimization nevertheless.
*/
if (file->state != &file->s->state.state)
2002-11-14 22:39:46 +01:00
file->s->state.state = *file->state;
2000-07-31 21:29:14 +02:00
if (file->s->base.auto_key)
update_auto_increment_key(&param, file, 1);
if (optimize_done)
error = update_state_info(&param, file,
UPDATE_TIME | UPDATE_OPEN_COUNT |
(local_testflag &
T_STATISTICS ? UPDATE_STAT : 0));
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
if (rows != file->state->records && ! (param.testflag & T_VERY_SILENT))
{
char llbuff[22],llbuff2[22];
mi_check_print_warning(&param,"Number of rows changed from %s to %s",
llstr(rows,llbuff),
llstr(file->state->records,llbuff2));
}
2000-07-31 21:29:14 +02:00
}
2000-10-23 14:35:42 +02:00
else
2000-07-31 21:29:14 +02:00
{
2000-10-23 14:35:42 +02:00
mi_mark_crashed_on_repair(file);
2000-07-31 21:29:14 +02:00
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
2000-10-23 14:35:42 +02:00
update_state_info(&param, file, 0);
2000-07-31 21:29:14 +02:00
}
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, old_proc_info);
if (! thd->locked_tables_mode)
mi_lock_database(file,F_UNLCK);
2000-10-17 04:29:56 +02:00
DBUG_RETURN(error ? HA_ADMIN_FAILED :
!optimize_done ? HA_ADMIN_ALREADY_DONE : HA_ADMIN_OK);
2000-07-31 21:29:14 +02:00
}
/*
Assign table indexes to a specific key cache.
*/
int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
{
KEY_CACHE *new_key_cache= check_opt->key_cache;
const char *errmsg= 0;
int error= HA_ADMIN_OK;
ulonglong map;
TABLE_LIST *table_list= table->pos_in_table_list;
DBUG_ENTER("ha_myisam::assign_to_keycache");
table->keys_in_use_for_query.clear_all();
if (table_list->process_index_hints(table))
DBUG_RETURN(HA_ADMIN_FAILED);
map= ~(ulonglong) 0;
if (!table->keys_in_use_for_query.is_clear_all())
/* use all keys if there's no list specified by the user through hints */
map= table->keys_in_use_for_query.to_ulonglong();
if ((error= mi_assign_to_key_cache(file, map, new_key_cache)))
{
2005-02-08 23:50:45 +01:00
char buf[STRING_BUFFER_USUAL_SIZE];
my_snprintf(buf, sizeof(buf),
"Failed to flush to index file (errno: %d)", error);
errmsg= buf;
error= HA_ADMIN_CORRUPT;
}
if (error != HA_ADMIN_OK)
{
/* Send error to user */
MI_CHECK param;
myisamchk_init(&param);
param.thd= thd;
param.op_name= "assign_to_keycache";
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg);
}
DBUG_RETURN(error);
}
/*
Preload pages of the index file for a table into the key cache.
*/
int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
{
int error;
const char *errmsg;
ulonglong map;
TABLE_LIST *table_list= table->pos_in_table_list;
my_bool ignore_leaves= table_list->ignore_leaves;
char buf[MYSQL_ERRMSG_SIZE];
DBUG_ENTER("ha_myisam::preload_keys");
table->keys_in_use_for_query.clear_all();
if (table_list->process_index_hints(table))
DBUG_RETURN(HA_ADMIN_FAILED);
2003-10-11 13:06:55 +02:00
map= ~(ulonglong) 0;
/* Check validity of the index references */
if (!table->keys_in_use_for_query.is_clear_all())
/* use all keys if there's no list specified by the user through hints */
map= table->keys_in_use_for_query.to_ulonglong();
mi_extra(file, HA_EXTRA_PRELOAD_BUFFER_SIZE,
(void *) &thd->variables.preload_buff_size);
if ((error= mi_preload(file, map, ignore_leaves)))
{
switch (error) {
case HA_ERR_NON_UNIQUE_BLOCK_SIZE:
errmsg= "Indexes use different block sizes";
break;
case HA_ERR_OUT_OF_MEM:
errmsg= "Failed to allocate buffer";
break;
2003-10-11 13:06:55 +02:00
default:
my_snprintf(buf, sizeof(buf),
"Failed to read from index file (errno: %d)", my_errno);
errmsg= buf;
}
error= HA_ADMIN_FAILED;
goto err;
}
2003-10-11 13:06:55 +02:00
DBUG_RETURN(HA_ADMIN_OK);
err:
{
MI_CHECK param;
myisamchk_init(&param);
param.thd= thd;
param.op_name= "preload_keys";
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg);
DBUG_RETURN(error);
}
}
/*
Disable indexes, making it persistent if requested.
SYNOPSIS
disable_indexes()
mode mode of operation:
HA_KEY_SWITCH_NONUNIQ disable all non-unique keys
HA_KEY_SWITCH_ALL disable all keys
HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent
HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent
IMPLEMENTATION
HA_KEY_SWITCH_NONUNIQ is not implemented.
HA_KEY_SWITCH_ALL_SAVE is not implemented.
RETURN
0 ok
HA_ERR_WRONG_COMMAND mode not implemented.
*/
int ha_myisam::disable_indexes(uint mode)
{
int error;
if (mode == HA_KEY_SWITCH_ALL)
{
/* call a storage engine function to switch the key map */
error= mi_disable_indexes(file);
}
else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
{
mi_extra(file, HA_EXTRA_NO_KEYS, 0);
info(HA_STATUS_CONST); // Read new key info
error= 0;
}
else
{
/* mode not implemented */
error= HA_ERR_WRONG_COMMAND;
}
return error;
}
/*
Enable indexes, making it persistent if requested.
SYNOPSIS
enable_indexes()
mode mode of operation:
HA_KEY_SWITCH_NONUNIQ enable all non-unique keys
HA_KEY_SWITCH_ALL enable all keys
HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent
HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent
DESCRIPTION
Enable indexes, which might have been disabled by disable_index() before.
The modes without _SAVE work only if both data and indexes are empty,
since the MyISAM repair would enable them persistently.
To be sure in these cases, call handler::delete_all_rows() before.
IMPLEMENTATION
HA_KEY_SWITCH_NONUNIQ is not implemented.
HA_KEY_SWITCH_ALL_SAVE is not implemented.
RETURN
0 ok
!=0 Error, among others:
HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry.
HA_ERR_WRONG_COMMAND mode not implemented.
*/
int ha_myisam::enable_indexes(uint mode)
{
int error;
DBUG_EXECUTE_IF("wait_in_enable_indexes",
debug_wait_for_kill("wait_in_enable_indexes"); );
if (mi_is_all_keys_active(file->s->state.key_map, file->s->base.keys))
{
/* All indexes are enabled already. */
return 0;
}
if (mode == HA_KEY_SWITCH_ALL)
{
error= mi_enable_indexes(file);
/*
Do not try to repair on error,
as this could make the enabled state persistent,
but mode==HA_KEY_SWITCH_ALL forbids it.
*/
}
else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
{
THD *thd=current_thd;
MI_CHECK param;
const char *save_proc_info=thd->proc_info;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, "Creating index");
myisamchk_init(&param);
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS);
param.myf_rw&= ~MY_WAIT_IF_FULL;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
param.tmpdir=&mysql_tmpdir_list;
if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair)
{
sql_print_warning("Warning: Enabling keys got errno %d on %s.%s, retrying",
my_errno, param.db_name, param.table_name);
/*
Repairing by sort failed. Now try standard repair method.
Still we want to fix only index file. If data file corruption
was detected (T_RETRY_WITHOUT_QUICK), we shouldn't do much here.
Let implicit repair do this job.
*/
if (!(param.testflag & T_RETRY_WITHOUT_QUICK))
{
param.testflag&= ~T_REP_BY_SORT;
error= (repair(thd,param,0) != HA_ADMIN_OK);
}
/*
If the standard repair succeeded, clear all error messages which
might have been set by the first repair. They can still be seen
with SHOW WARNINGS then.
*/
if (! error)
thd->clear_error();
}
info(HA_STATUS_CONST);
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
thd_proc_info(thd, save_proc_info);
}
else
{
/* mode not implemented */
error= HA_ERR_WRONG_COMMAND;
}
return error;
}
/*
Test if indexes are disabled.
SYNOPSIS
indexes_are_disabled()
no parameters
RETURN
0 indexes are not disabled
1 all indexes are disabled
[2 non-unique indexes are disabled - NOT YET IMPLEMENTED]
*/
int ha_myisam::indexes_are_disabled(void)
{
return mi_indexes_are_disabled(file);
}
/*
prepare for a many-rows insert operation
e.g. - disable indexes (if they can be recreated fast) or
activate special bulk-insert optimizations
SYNOPSIS
start_bulk_insert(rows)
rows Rows to be inserted
0 if we don't know
NOTICE
Do not forget to call end_bulk_insert() later!
*/
void ha_myisam::start_bulk_insert(ha_rows rows)
{
DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd= current_thd;
ulong size= min(thd->variables.read_buff_size,
(ulong) (table->s->avg_row_length*rows));
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
(ulong) rows, size));
/* don't enable row cache if too few rows */
if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE))
mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size);
can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map,
file->s->base.keys);
if (!(specialflag & SPECIAL_SAFE_MODE))
{
/*
Only disable old index if the table was empty and we are inserting
a lot of rows.
Note that in end_bulk_insert() we may truncate the table if
enable_indexes() failed, thus it's essential that indexes are
disabled ONLY for an empty table.
*/
if (file->state->records == 0 && can_enable_indexes &&
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES))
mi_disable_non_unique_index(file,rows);
else
if (!file->bulk_insert &&
(!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT))
{
mi_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows);
}
}
DBUG_VOID_RETURN;
}
/*
end special bulk-insert optimizations,
which have been activated by start_bulk_insert().
SYNOPSIS
end_bulk_insert()
no arguments
RETURN
0 OK
!= 0 Error
*/
int ha_myisam::end_bulk_insert()
{
2002-12-07 22:40:20 +01:00
mi_end_bulk_insert(file);
int err=mi_extra(file, HA_EXTRA_NO_CACHE, 0);
if (!err)
{
if (can_enable_indexes)
{
/*
Truncate the table when enable index operation is killed.
After truncating the table we don't need to enable the
indexes, because the last repair operation is aborted after
setting the indexes as active and trying to recreate them.
*/
if (((err= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)) != 0) &&
current_thd->killed)
{
delete_all_rows();
/* not crashed, despite being killed during repair */
file->s->state.changed&= ~(STATE_CRASHED|STATE_CRASHED_ON_REPAIR);
}
}
}
return err;
}
2000-07-31 21:29:14 +02:00
bool ha_myisam::check_and_repair(THD *thd)
{
int error=0;
2000-10-23 14:35:42 +02:00
int marked_crashed;
HA_CHECK_OPT check_opt;
DBUG_ENTER("ha_myisam::check_and_repair");
check_opt.init();
check_opt.flags= T_MEDIUM | T_AUTO_REPAIR;
// Don't use quick if deleted rows
if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK))
check_opt.flags|=T_QUICK;
sql_print_warning("Checking table: '%s'",table->s->path.str);
Bug#57306 SHOW PROCESSLIST does not display string literals well. Problem: Extended characters outside of ASCII range where not displayed properly in SHOW PROCESSLIST, because thd_info->query was always sent as system_character_set (utf8). This was wrong, because query buffer is never converted to utf8 - it is always have client character set. Fix: sending query buffer using query character set @ sql/sql_class.cc @ sql/sql_class.h Introducing a new class CSET_STRING, a LEX_STRING with character set. Adding set_query(&CSET_STRING) Adding reset_query(), to use instead of set_query(0, NULL). @ sql/event_data_objects.cc Using reset_query() @ sql/log_event.cc Using reset_query() Adding charset argument to set_query_and_id(). @ sql/slave.cc Using reset_query(). @ sql/sp_head.cc Changing backing up and restore code to use CSET_STRING. @ sql/sql_audit.h Using CSET_STRING. In the "else" branch it's OK not to use global_system_variables.character_set_client. &my_charset_latin1, which is set in constructor, is fine (verified with Sergey Vojtovich). @ sql/sql_insert.cc Using set_query() with proper character set: table_name is utf8. @ sql/sql_parse.cc Adding character set argument to set_query_and_id(). (This is the main point where thd->charset() is stored into thd->query_string.cs, for use in "SHOW PROCESSLIST".) Using reset_query(). @ sql/sql_prepare.cc Storing client character set into thd->query_string.cs. @ sql/sql_show.cc Using CSET_STRING to fetch and send charset-aware query information from threads. @ storage/myisam/ha_myisam.cc Using set_query() with proper character set: table_name is utf8. @ mysql-test/r/show_check.result @ mysql-test/t/show_check.test Adding tests
2010-11-18 15:08:32 +01:00
const CSET_STRING query_backup= thd->query_string;
thd->set_query(table->s->table_name.str,
Bug#57306 SHOW PROCESSLIST does not display string literals well. Problem: Extended characters outside of ASCII range where not displayed properly in SHOW PROCESSLIST, because thd_info->query was always sent as system_character_set (utf8). This was wrong, because query buffer is never converted to utf8 - it is always have client character set. Fix: sending query buffer using query character set @ sql/sql_class.cc @ sql/sql_class.h Introducing a new class CSET_STRING, a LEX_STRING with character set. Adding set_query(&CSET_STRING) Adding reset_query(), to use instead of set_query(0, NULL). @ sql/event_data_objects.cc Using reset_query() @ sql/log_event.cc Using reset_query() Adding charset argument to set_query_and_id(). @ sql/slave.cc Using reset_query(). @ sql/sp_head.cc Changing backing up and restore code to use CSET_STRING. @ sql/sql_audit.h Using CSET_STRING. In the "else" branch it's OK not to use global_system_variables.character_set_client. &my_charset_latin1, which is set in constructor, is fine (verified with Sergey Vojtovich). @ sql/sql_insert.cc Using set_query() with proper character set: table_name is utf8. @ sql/sql_parse.cc Adding character set argument to set_query_and_id(). (This is the main point where thd->charset() is stored into thd->query_string.cs, for use in "SHOW PROCESSLIST".) Using reset_query(). @ sql/sql_prepare.cc Storing client character set into thd->query_string.cs. @ sql/sql_show.cc Using CSET_STRING to fetch and send charset-aware query information from threads. @ storage/myisam/ha_myisam.cc Using set_query() with proper character set: table_name is utf8. @ mysql-test/r/show_check.result @ mysql-test/t/show_check.test Adding tests
2010-11-18 15:08:32 +01:00
(uint) table->s->table_name.length, system_charset_info);
if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt))
{
sql_print_warning("Recovering table: '%s'",table->s->path.str);
check_opt.flags=
((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) |
(marked_crashed ? 0 : T_QUICK) |
(myisam_recover_options & HA_RECOVER_FORCE ? 0 : T_SAFE_REPAIR) |
T_AUTO_REPAIR);
if (repair(thd, &check_opt))
error=1;
}
Bug#57306 SHOW PROCESSLIST does not display string literals well. Problem: Extended characters outside of ASCII range where not displayed properly in SHOW PROCESSLIST, because thd_info->query was always sent as system_character_set (utf8). This was wrong, because query buffer is never converted to utf8 - it is always have client character set. Fix: sending query buffer using query character set @ sql/sql_class.cc @ sql/sql_class.h Introducing a new class CSET_STRING, a LEX_STRING with character set. Adding set_query(&CSET_STRING) Adding reset_query(), to use instead of set_query(0, NULL). @ sql/event_data_objects.cc Using reset_query() @ sql/log_event.cc Using reset_query() Adding charset argument to set_query_and_id(). @ sql/slave.cc Using reset_query(). @ sql/sp_head.cc Changing backing up and restore code to use CSET_STRING. @ sql/sql_audit.h Using CSET_STRING. In the "else" branch it's OK not to use global_system_variables.character_set_client. &my_charset_latin1, which is set in constructor, is fine (verified with Sergey Vojtovich). @ sql/sql_insert.cc Using set_query() with proper character set: table_name is utf8. @ sql/sql_parse.cc Adding character set argument to set_query_and_id(). (This is the main point where thd->charset() is stored into thd->query_string.cs, for use in "SHOW PROCESSLIST".) Using reset_query(). @ sql/sql_prepare.cc Storing client character set into thd->query_string.cs. @ sql/sql_show.cc Using CSET_STRING to fetch and send charset-aware query information from threads. @ storage/myisam/ha_myisam.cc Using set_query() with proper character set: table_name is utf8. @ mysql-test/r/show_check.result @ mysql-test/t/show_check.test Adding tests
2010-11-18 15:08:32 +01:00
thd->set_query(query_backup);
DBUG_RETURN(error);
}
bool ha_myisam::is_crashed() const
{
return (file->s->state.changed & STATE_CRASHED ||
(my_disable_locking && file->s->state.open_count));
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
2000-07-31 21:29:14 +02:00
{
ha_statistic_increment(&SSV::ha_update_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
2000-07-31 21:29:14 +02:00
return mi_update(file,old_data,new_data);
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::delete_row(const uchar *buf)
2000-07-31 21:29:14 +02:00
{
ha_statistic_increment(&SSV::ha_delete_count);
2000-07-31 21:29:14 +02:00
return mi_delete(file,buf);
}
int ha_myisam::index_read_map(uchar *buf, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag);
2000-07-31 21:29:14 +02:00
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, index, key, keypart_map, find_flag);
2000-07-31 21:29:14 +02:00
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
int ha_myisam::index_read_last_map(uchar *buf, const uchar *key,
key_part_map keypart_map)
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
DBUG_ENTER("ha_myisam::index_read_last");
DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_key_count);
int error=mi_rkey(file, buf, active_index, key, keypart_map,
HA_READ_PREFIX_LAST);
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
DBUG_RETURN(error);
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::index_next(uchar *buf)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_next_count);
2000-07-31 21:29:14 +02:00
int error=mi_rnext(file,buf,active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::index_prev(uchar *buf)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_prev_count);
2000-07-31 21:29:14 +02:00
int error=mi_rprev(file,buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::index_first(uchar *buf)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_first_count);
2000-07-31 21:29:14 +02:00
int error=mi_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::index_last(uchar *buf)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
DBUG_ASSERT(inited==INDEX);
ha_statistic_increment(&SSV::ha_read_last_count);
2000-07-31 21:29:14 +02:00
int error=mi_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::index_next_same(uchar *buf,
const uchar *key __attribute__((unused)),
2000-07-31 21:29:14 +02:00
uint length __attribute__((unused)))
{
int error;
DBUG_ASSERT(inited==INDEX);
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
ha_statistic_increment(&SSV::ha_read_next_count);
do
{
error= mi_rnext_same(file,buf);
} while (error == HA_ERR_RECORD_DELETED);
2000-07-31 21:29:14 +02:00
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_INDEX_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
int ha_myisam::rnd_init(bool scan)
{
if (scan)
return mi_scan_init(file);
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
return mi_reset(file); // Free buffers
2000-07-31 21:29:14 +02:00
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::rnd_next(uchar *buf)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str,
TRUE);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
2000-07-31 21:29:14 +02:00
int error=mi_scan(file, buf);
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::restart_rnd_next(uchar *buf, uchar *pos)
2000-07-31 21:29:14 +02:00
{
return rnd_pos(buf,pos);
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::rnd_pos(uchar *buf, uchar *pos)
2000-07-31 21:29:14 +02:00
{
2008-12-20 11:01:41 +01:00
MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str,
FALSE);
ha_statistic_increment(&SSV::ha_read_rnd_count);
int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length));
2000-07-31 21:29:14 +02:00
table->status=error ? STATUS_NOT_FOUND: 0;
2008-12-20 11:01:41 +01:00
MYSQL_READ_ROW_DONE(error);
2000-07-31 21:29:14 +02:00
return error;
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
void ha_myisam::position(const uchar *record)
2000-07-31 21:29:14 +02:00
{
my_off_t row_position= mi_position(file);
my_store_ptr(ref, ref_length, row_position);
2000-07-31 21:29:14 +02:00
}
int ha_myisam::info(uint flag)
2000-07-31 21:29:14 +02:00
{
MI_ISAMINFO misam_info;
char name_buff[FN_REFLEN];
(void) mi_status(file,&misam_info,flag);
2000-07-31 21:29:14 +02:00
if (flag & HA_STATUS_VARIABLE)
{
stats.records= misam_info.records;
stats.deleted= misam_info.deleted;
stats.data_file_length= misam_info.data_file_length;
stats.index_file_length= misam_info.index_file_length;
stats.delete_length= misam_info.delete_length;
stats.check_time= (ulong) misam_info.check_time;
stats.mean_rec_length= misam_info.mean_reclength;
2000-07-31 21:29:14 +02:00
}
if (flag & HA_STATUS_CONST)
{
TABLE_SHARE *share= table->s;
stats.max_data_file_length= misam_info.max_data_file_length;
stats.max_index_file_length= misam_info.max_index_file_length;
stats.create_time= (ulong) misam_info.create_time;
ref_length= misam_info.reflength;
share->db_options_in_use= misam_info.options;
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
stats.block_size= myisam_block_size; /* record block size */
/* Update share */
if (share->tmp_table == NO_TMP_TABLE)
2010-02-03 14:43:03 +01:00
mysql_mutex_lock(&share->LOCK_ha_data);
share->keys_in_use.set_prefix(share->keys);
share->keys_in_use.intersect_extended(misam_info.key_map);
share->keys_for_keyread.intersect(share->keys_in_use);
share->db_record_offset= misam_info.record_offset;
if (share->key_parts)
2000-07-31 21:29:14 +02:00
memcpy((char*) table->key_info[0].rec_per_key,
(char*) misam_info.rec_per_key,
sizeof(table->key_info[0].rec_per_key[0])*share->key_parts);
if (share->tmp_table == NO_TMP_TABLE)
2010-02-03 14:43:03 +01:00
mysql_mutex_unlock(&share->LOCK_ha_data);
/*
Set data_file_name and index_file_name to point at the symlink value
if table is symlinked (Ie; Real name is not same as generated name)
*/
data_file_name= index_file_name= 0;
fn_format(name_buff, file->filename, "", MI_NAME_DEXT,
MY_APPEND_EXT | MY_UNPACK_FILENAME);
if (strcmp(name_buff, misam_info.data_file_name))
data_file_name=misam_info.data_file_name;
fn_format(name_buff, file->filename, "", MI_NAME_IEXT,
MY_APPEND_EXT | MY_UNPACK_FILENAME);
if (strcmp(name_buff, misam_info.index_file_name))
index_file_name=misam_info.index_file_name;
2000-07-31 21:29:14 +02:00
}
if (flag & HA_STATUS_ERRKEY)
{
errkey = misam_info.errkey;
my_store_ptr(dup_ref, ref_length, misam_info.dupp_key_pos);
2000-07-31 21:29:14 +02:00
}
if (flag & HA_STATUS_TIME)
stats.update_time = (ulong) misam_info.update_time;
2000-07-31 21:29:14 +02:00
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= misam_info.auto_increment;
return 0;
2000-07-31 21:29:14 +02:00
}
int ha_myisam::extra(enum ha_extra_function operation)
{
if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_KEYREAD)
return 0;
if (operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap)
return 0;
return mi_extra(file, operation, 0);
}
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
int ha_myisam::reset(void)
{
return mi_reset(file);
}
2002-12-07 22:40:20 +01:00
/* To be used with WRITE_CACHE and EXTRA_CACHE */
int ha_myisam::extra_opt(enum ha_extra_function operation, ulong cache_size)
{
if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE)
2000-07-31 21:29:14 +02:00
return 0;
return mi_extra(file, operation, (void*) &cache_size);
2000-07-31 21:29:14 +02:00
}
int ha_myisam::delete_all_rows()
{
return mi_delete_all_rows(file);
}
Bug#49938: Failing assertion: inode or deadlock in fsp/fsp0fsp.c Bug#54678: InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock - Incompatible change: truncate no longer resorts to a row by row delete if the storage engine does not support the truncate method. Consequently, the count of affected rows does not, in any case, reflect the actual number of rows. - Incompatible change: it is no longer possible to truncate a table that participates as a parent in a foreign key constraint, unless it is a self-referencing constraint (both parent and child are in the same table). To work around this incompatible change and still be able to truncate such tables, disable foreign checks with SET foreign_key_checks=0 before truncate. Alternatively, if foreign key checks are necessary, please use a DELETE statement without a WHERE condition. Problem description: The problem was that for storage engines that do not support truncate table via a external drop and recreate, such as InnoDB which implements truncate via a internal drop and recreate, the delete_all_rows method could be invoked with a shared metadata lock, causing problems if the engine needed exclusive access to some internal metadata. This problem originated with the fact that there is no truncate specific handler method, which ended up leading to a abuse of the delete_all_rows method that is primarily used for delete operations without a condition. Solution: The solution is to introduce a truncate handler method that is invoked when the engine does not support truncation via a table drop and recreate. This method is invoked under a exclusive metadata lock, so that there is only a single instance of the table when the method is invoked. Also, the method is not invoked and a error is thrown if the table is a parent in a non-self-referencing foreign key relationship. This was necessary to avoid inconsistency as some integrity checks are bypassed. This is inline with the fact that truncate is primarily a DDL operation that was designed to quickly remove all data from a table.
2010-10-06 16:34:28 +02:00
/*
Intended to support partitioning.
Allows a particular partition to be truncated.
*/
int ha_myisam::truncate()
{
int error= delete_all_rows();
return error ? error : reset_auto_increment(0);
}
int ha_myisam::reset_auto_increment(ulonglong value)
{
file->s->state.auto_increment= value;
return 0;
}
2000-07-31 21:29:14 +02:00
int ha_myisam::delete_table(const char *name)
{
return mi_delete_table(name);
}
2000-07-31 21:29:14 +02:00
int ha_myisam::external_lock(THD *thd, int lock_type)
{
file->in_use.data= thd;
return mi_lock_database(file, !table->s->tmp_table ?
lock_type : ((lock_type == F_UNLCK) ?
2003-07-19 15:17:29 +02:00
F_UNLCK : F_EXTRA_LCK));
}
2000-07-31 21:29:14 +02:00
THR_LOCK_DATA **ha_myisam::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK)
file->lock.type=lock_type;
*to++= &file->lock;
return to;
}
void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
{
ha_myisam::info(HA_STATUS_AUTO | HA_STATUS_CONST);
2000-07-31 21:29:14 +02:00
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
create_info->auto_increment_value= stats.auto_increment_value;
2000-07-31 21:29:14 +02:00
}
create_info->data_file_name=data_file_name;
create_info->index_file_name=index_file_name;
2000-07-31 21:29:14 +02:00
}
int ha_myisam::create(const char *name, register TABLE *table_arg,
HA_CREATE_INFO *ha_create_info)
2000-07-31 21:29:14 +02:00
{
int error;
uint create_flags= 0, records, i;
2000-07-31 21:29:14 +02:00
char buff[FN_REFLEN];
MI_KEYDEF *keydef;
MI_COLUMNDEF *recinfo;
2000-07-31 21:29:14 +02:00
MI_CREATE_INFO create_info;
TABLE_SHARE *share= table_arg->s;
uint options= share->db_options_in_use;
2000-07-31 21:29:14 +02:00
DBUG_ENTER("ha_myisam::create");
for (i= 0; i < share->keys; i++)
{
if (table_arg->key_info[i].flags & HA_USES_PARSER)
2000-07-31 21:29:14 +02:00
{
create_flags|= HA_CREATE_RELIES_ON_SQL_LAYER;
2000-07-31 21:29:14 +02:00
break;
}
}
if ((error= table2myisam(table_arg, &keydef, &recinfo, &records)))
DBUG_RETURN(error); /* purecov: inspected */
bzero((char*) &create_info, sizeof(create_info));
create_info.max_rows= share->max_rows;
create_info.reloc_rows= share->min_rows;
create_info.with_auto_increment= share->next_number_key_offset == 0;
create_info.auto_increment= (ha_create_info->auto_increment_value ?
ha_create_info->auto_increment_value -1 :
(ulonglong) 0);
create_info.data_file_length= ((ulonglong) share->max_rows *
share->avg_row_length);
create_info.data_file_name= ha_create_info->data_file_name;
create_info.index_file_name= ha_create_info->index_file_name;
create_info.language= share->table_charset->number;
2000-07-31 21:29:14 +02:00
if (ha_create_info->options & HA_LEX_CREATE_TMP_TABLE)
create_flags|= HA_CREATE_TMP_TABLE;
if (ha_create_info->options & HA_CREATE_KEEP_FILES)
create_flags|= HA_CREATE_KEEP_FILES;
if (options & HA_OPTION_PACK_RECORD)
create_flags|= HA_PACK_RECORD;
if (options & HA_OPTION_CHECKSUM)
create_flags|= HA_CREATE_CHECKSUM;
if (options & HA_OPTION_DELAY_KEY_WRITE)
create_flags|= HA_CREATE_DELAY_KEY_WRITE;
/* TODO: Check that the following fn_format is really needed */
error= mi_create(fn_format(buff, name, "", "",
MY_UNPACK_FILENAME|MY_APPEND_EXT),
share->keys, keydef,
records, recinfo,
0, (MI_UNIQUEDEF*) 0,
&create_info, create_flags);
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled Essentially, the problem is that safemalloc is excruciatingly slow as it checks all allocated blocks for overrun at each memory management primitive, yielding a almost exponential slowdown for the memory management functions (malloc, realloc, free). The overrun check basically consists of verifying some bytes of a block for certain magic keys, which catches some simple forms of overrun. Another minor problem is violation of aliasing rules and that its own internal list of blocks is prone to corruption. Another issue with safemalloc is rather the maintenance cost as the tool has a significant impact on the server code. Given the magnitude of memory debuggers available nowadays, especially those that are provided with the platform malloc implementation, maintenance of a in-house and largely obsolete memory debugger becomes a burden that is not worth the effort due to its slowness and lack of support for detecting more common forms of heap corruption. Since there are third-party tools that can provide the same functionality at a lower or comparable performance cost, the solution is to simply remove safemalloc. Third-party tools can provide the same functionality at a lower or comparable performance cost. The removal of safemalloc also allows a simplification of the malloc wrappers, removing quite a bit of kludge: redefinition of my_malloc, my_free and the removal of the unused second argument of my_free. Since free() always check whether the supplied pointer is null, redudant checks are also removed. Also, this patch adds unit testing for my_malloc and moves my_realloc implementation into the same file as the other memory allocation primitives.
2010-07-08 23:20:08 +02:00
my_free(recinfo);
2000-07-31 21:29:14 +02:00
DBUG_RETURN(error);
}
int ha_myisam::rename_table(const char * from, const char * to)
{
return mi_rename(from,to);
}
void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values)
2000-07-31 21:29:14 +02:00
{
ulonglong nr;
int error;
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
uchar key[MI_MAX_KEY_LENGTH];
if (!table->s->next_number_key_offset)
2000-07-31 21:29:14 +02:00
{ // Autoincrement at key-start
ha_myisam::info(HA_STATUS_AUTO);
*first_value= stats.auto_increment_value;
/* MyISAM has only table-level lock, so reserves to +inf */
*nb_reserved_values= ULONGLONG_MAX;
return;
2000-07-31 21:29:14 +02:00
}
/* it's safe to call the following if bulk_insert isn't on */
mi_flush_bulk_insert(file, table->s->next_number_index);
2000-07-31 21:29:14 +02:00
(void) extra(HA_EXTRA_KEYREAD);
key_copy(key, table->record[0],
table->key_info + table->s->next_number_index,
table->s->next_number_key_offset);
error= mi_rkey(file, table->record[1], (int) table->s->next_number_index,
key, make_prev_keypart_map(table->s->next_number_keypart),
HA_READ_PREFIX_LAST);
2000-07-31 21:29:14 +02:00
if (error)
nr= 1;
2000-07-31 21:29:14 +02:00
else
{
/* Get data from record[1] */
nr= ((ulonglong) table->next_number_field->
val_int_offset(table->s->rec_buff_length)+1);
}
2000-07-31 21:29:14 +02:00
extra(HA_EXTRA_NO_KEYREAD);
*first_value= nr;
/*
MySQL needs to call us for next row: assume we are inserting ("a",null)
here, we return 3, and next this statement will want to insert ("b",null):
there is no reason why ("b",3+1) would be the good row to insert: maybe it
already exists, maybe 3+1 is too large...
*/
*nb_reserved_values= 1;
2000-07-31 21:29:14 +02:00
}
2003-03-18 02:16:12 +01:00
/*
Find out how many rows there is in the given range
SYNOPSIS
records_in_range()
inx Index to use
min_key Start of range. Null pointer if from first key
max_key End of range. Null pointer if to last key
2003-03-18 02:16:12 +01:00
NOTES
min_key.flag can have one of the following values:
2003-03-18 02:16:12 +01:00
HA_READ_KEY_EXACT Include the key in the range
HA_READ_AFTER_KEY Don't include key in range
max_key.flag can have one of the following values:
2003-03-18 02:16:12 +01:00
HA_READ_BEFORE_KEY Don't include key in range
HA_READ_AFTER_KEY Include all 'end_key' values in the range
RETURN
HA_POS_ERROR Something is wrong with the index tree.
0 There is no matching keys in the given range
number > 0 There is approximately 'number' matching rows in
the range.
*/
ha_rows ha_myisam::records_in_range(uint inx, key_range *min_key,
key_range *max_key)
2000-07-31 21:29:14 +02:00
{
return (ha_rows) mi_records_in_range(file, (int) inx, min_key, max_key);
2000-07-31 21:29:14 +02:00
}
2003-03-18 02:16:12 +01:00
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
int ha_myisam::ft_read(uchar *buf)
2000-07-31 21:29:14 +02:00
{
int error;
if (!ft_handler)
return -1;
thread_safe_increment(table->in_use->status_var.ha_read_next_count,
&LOCK_status); // why ?
2000-07-31 21:29:14 +02:00
2001-10-09 14:53:54 +02:00
error=ft_handler->please->read_next(ft_handler,(char*) buf);
2000-07-31 21:29:14 +02:00
table->status=error ? STATUS_NOT_FOUND: 0;
return error;
}
uint ha_myisam::checksum() const
{
return (uint)file->state->checksum;
}
2005-07-22 22:43:59 +02:00
bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
uint options= table->s->db_options_in_use;
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
if (info->auto_increment_value != stats.auto_increment_value ||
2005-07-22 22:43:59 +02:00
info->data_file_name != data_file_name ||
info->index_file_name != index_file_name ||
table_changes == IS_EQUAL_NO ||
table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet
2005-07-22 22:43:59 +02:00
return COMPATIBLE_DATA_NO;
if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
HA_OPTION_DELAY_KEY_WRITE)) !=
(info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
HA_OPTION_DELAY_KEY_WRITE)))
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
extern int mi_panic(enum ha_panic_function flag);
int myisam_panic(handlerton *hton, ha_panic_function flag)
{
return mi_panic(flag);
}
static int myisam_init(void *p)
{
handlerton *myisam_hton;
#ifdef HAVE_PSI_INTERFACE
init_myisam_psi_keys();
#endif
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
/* Set global variables based on startup options */
if (myisam_recover_options)
ha_open_options|=HA_OPEN_ABORT_IF_CRASHED;
else
myisam_recover_options= HA_RECOVER_OFF;
myisam_block_size=(uint) 1 << my_bit_log2(opt_myisam_block_size);
myisam_hton= (handlerton *)p;
myisam_hton->state= SHOW_OPTION_YES;
myisam_hton->db_type= DB_TYPE_MYISAM;
myisam_hton->create= myisam_create_handler;
myisam_hton->panic= myisam_panic;
myisam_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES;
return 0;
}
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
static struct st_mysql_sys_var* myisam_sysvars[]= {
MYSQL_SYSVAR(block_size),
MYSQL_SYSVAR(data_pointer_size),
MYSQL_SYSVAR(max_sort_file_size),
MYSQL_SYSVAR(recover_options),
MYSQL_SYSVAR(repair_threads),
MYSQL_SYSVAR(sort_buffer_size),
MYSQL_SYSVAR(use_mmap),
MYSQL_SYSVAR(mmap_size),
MYSQL_SYSVAR(stats_method),
0
};
struct st_mysql_storage_engine myisam_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
mysql_declare_plugin(myisam)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
&myisam_storage_engine,
"MyISAM",
"MySQL AB",
"MyISAM storage engine",
PLUGIN_LICENSE_GPL,
myisam_init, /* Plugin Init */
NULL, /* Plugin Deinit */
0x0100, /* 1.0 */
NULL, /* status variables */
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
myisam_sysvars, /* system variables */
2011-08-15 20:12:11 +02:00
NULL,
0,
}
mysql_declare_plugin_end;
#ifdef HAVE_QUERY_CACHE
/**
@brief Register a named table with a call back function to the query cache.
@param thd The thread handle
@param table_key A pointer to the table name in the table cache
@param key_length The length of the table name
@param[out] engine_callback The pointer to the storage engine call back
function, currently 0
@param[out] engine_data Engine data will be set to 0.
@note Despite the name of this function, it is used to check each statement
before it is cached and not to register a table or callback function.
@see handler::register_query_cache_table
@return The error code. The engine_data and engine_callback will be set to 0.
@retval TRUE Success
@retval FALSE An error occured
*/
my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name,
uint table_name_len,
qc_engine_callback
*engine_callback,
ulonglong *engine_data)
{
DBUG_ENTER("ha_myisam::register_query_cache_table");
/*
No call back function is needed to determine if a cached statement
is valid or not.
*/
*engine_callback= 0;
/*
No engine data is needed.
*/
*engine_data= 0;
if (file->s->concurrent_insert)
{
/*
If a concurrent INSERT has happened just before the currently
processed SELECT statement, the total size of the table is
unknown.
To determine if the table size is known, the current thread's snap
shot of the table size with the actual table size are compared.
If the table size is unknown the SELECT statement can't be cached.
When concurrent inserts are disabled at table open, mi_open()
does not assign a get_status() function. In this case the local
("current") status is never updated. We would wrongly think that
we cannot cache the statement.
*/
ulonglong actual_data_file_length;
ulonglong current_data_file_length;
/*
POSIX visibility rules specify that "2. Whatever memory values a
thread can see when it unlocks a mutex <...> can also be seen by any
thread that later locks the same mutex". In this particular case,
concurrent insert thread had modified the data_file_length in
MYISAM_SHARE before it has unlocked (or even locked)
structure_guard_mutex. So, here we're guaranteed to see at least that
value after we've locked the same mutex. We can see a later value
(modified by some other thread) though, but it's ok, as we only want
to know if the variable was changed, the actual new value doesn't matter
*/
actual_data_file_length= file->s->state.state.data_file_length;
current_data_file_length= file->save_state.data_file_length;
if (current_data_file_length != actual_data_file_length)
{
/* Don't cache current statement. */
DBUG_RETURN(FALSE);
}
}
/*
This query execution might have started after the query cache was flushed
by a concurrent INSERT. In this case, don't cache this statement as the
data file length difference might not be visible yet if the tables haven't
been unlocked by the concurrent insert thread.
*/
if (file->state->uncacheable)
DBUG_RETURN(FALSE);
/* It is ok to try to cache current statement. */
DBUG_RETURN(TRUE);
}
#endif