2011-07-04 01:25:49 +02:00
|
|
|
/*
|
|
|
|
Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2006-12-23 20:17:15 +01:00
|
|
|
the Free Software Foundation; version 2 of the License.
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
2011-06-30 17:46:53 +02:00
|
|
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
|
2005-05-26 12:09:14 +02:00
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
2000-07-31 21:29:14 +02:00
|
|
|
#pragma implementation // gcc: Class implementation
|
|
|
|
#endif
|
|
|
|
|
2006-08-20 02:38:42 +02:00
|
|
|
#define MYSQL_SERVER 1
|
2010-03-31 16:05:33 +02:00
|
|
|
#include "sql_priv.h"
|
2008-12-20 11:01:41 +01:00
|
|
|
#include "probes_mysql.h"
|
2010-03-31 16:05:33 +02:00
|
|
|
#include "key.h" // key_copy
|
|
|
|
#include "sql_plugin.h"
|
2000-07-31 21:29:14 +02:00
|
|
|
#include <m_ctype.h>
|
2007-10-11 17:07:40 +02:00
|
|
|
#include <my_bit.h>
|
2000-07-31 21:29:14 +02:00
|
|
|
#include <myisampack.h>
|
|
|
|
#include "ha_myisam.h"
|
|
|
|
#include <stdarg.h>
|
2006-08-20 02:38:42 +02:00
|
|
|
#include "myisamdef.h"
|
|
|
|
#include "rt_index.h"
|
2010-03-31 16:05:33 +02:00
|
|
|
#include "sql_table.h" // tablename_to_filename
|
|
|
|
#include "sql_class.h" // THD
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
ulonglong myisam_recover_options;
|
|
|
|
static ulong opt_myisam_block_size;
|
2000-09-25 23:33:25 +02:00
|
|
|
|
2000-10-03 13:18:03 +02:00
|
|
|
/* bits in myisam_recover_options */
|
2000-09-25 23:33:25 +02:00
|
|
|
const char *myisam_recover_names[] =
|
2009-12-22 10:35:56 +01:00
|
|
|
{ "DEFAULT", "BACKUP", "FORCE", "QUICK", "OFF", NullS};
|
2001-09-27 20:45:48 +02:00
|
|
|
TYPELIB myisam_recover_typelib= {array_elements(myisam_recover_names)-1,"",
|
2004-10-25 14:51:26 +02:00
|
|
|
myisam_recover_names, NULL};
|
2000-09-25 23:33:25 +02:00
|
|
|
|
2005-09-23 23:39:50 +02:00
|
|
|
const char *myisam_stats_method_names[] = {"nulls_unequal", "nulls_equal",
|
2005-10-21 04:29:17 +02:00
|
|
|
"nulls_ignored", NullS};
|
2005-09-21 00:18:29 +02:00
|
|
|
TYPELIB myisam_stats_method_typelib= {
|
|
|
|
array_elements(myisam_stats_method_names) - 1, "",
|
|
|
|
myisam_stats_method_names, NULL};
|
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
static MYSQL_SYSVAR_ULONG(block_size, opt_myisam_block_size,
|
|
|
|
PLUGIN_VAR_NOSYSVAR | PLUGIN_VAR_RQCMDARG,
|
|
|
|
"Block size to be used for MyISAM index pages", NULL, NULL,
|
|
|
|
MI_KEY_BLOCK_LENGTH, MI_MIN_KEY_BLOCK_LENGTH, MI_MAX_KEY_BLOCK_LENGTH,
|
|
|
|
MI_MIN_KEY_BLOCK_LENGTH);
|
|
|
|
|
|
|
|
static MYSQL_SYSVAR_ULONG(data_pointer_size, myisam_data_pointer_size,
|
|
|
|
PLUGIN_VAR_RQCMDARG, "Default pointer size to be used for MyISAM tables",
|
|
|
|
NULL, NULL, 6, 2, 7, 1);
|
|
|
|
|
|
|
|
#define MB (1024*1024)
|
|
|
|
static MYSQL_SYSVAR_ULONGLONG(max_sort_file_size, myisam_max_temp_length,
|
|
|
|
PLUGIN_VAR_RQCMDARG, "Don't use the fast sort index method to created "
|
|
|
|
"index if the temporary file would get bigger than this", NULL, NULL,
|
|
|
|
LONG_MAX/MB*MB, 0, MAX_FILE_SIZE, MB);
|
|
|
|
|
|
|
|
static MYSQL_SYSVAR_SET(recover_options, myisam_recover_options,
|
|
|
|
PLUGIN_VAR_OPCMDARG|PLUGIN_VAR_READONLY,
|
|
|
|
"Syntax: myisam-recover-options[=option[,option...]], where option can be "
|
|
|
|
"DEFAULT, BACKUP, FORCE, QUICK, or OFF",
|
|
|
|
NULL, NULL, 0, &myisam_recover_typelib);
|
|
|
|
|
|
|
|
static MYSQL_THDVAR_ULONG(repair_threads, PLUGIN_VAR_RQCMDARG,
|
|
|
|
"If larger than 1, when repairing a MyISAM table all indexes will be "
|
|
|
|
"created in parallel, with one thread per index. The value of 1 "
|
|
|
|
"disables parallel repair", NULL, NULL,
|
|
|
|
1, 1, ULONG_MAX, 1);
|
|
|
|
|
|
|
|
static MYSQL_THDVAR_ULONG(sort_buffer_size, PLUGIN_VAR_RQCMDARG,
|
|
|
|
"The buffer that is allocated when sorting the index when doing "
|
|
|
|
"a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE", NULL, NULL,
|
2010-04-03 10:37:53 +02:00
|
|
|
8192*1024, (long) (MIN_SORT_BUFFER + MALLOC_OVERHEAD), ULONG_MAX, 1);
|
2009-12-22 10:35:56 +01:00
|
|
|
|
|
|
|
static MYSQL_SYSVAR_BOOL(use_mmap, opt_myisam_use_mmap, PLUGIN_VAR_NOCMDARG,
|
|
|
|
"Use memory mapping for reading and writing MyISAM tables", NULL, NULL, FALSE);
|
|
|
|
|
|
|
|
static MYSQL_SYSVAR_ULONGLONG(mmap_size, myisam_mmap_size,
|
|
|
|
PLUGIN_VAR_RQCMDARG|PLUGIN_VAR_READONLY, "Restricts the total memory "
|
|
|
|
"used for memory mapping of MySQL tables", NULL, NULL,
|
|
|
|
SIZE_T_MAX, MEMMAP_EXTRA_MARGIN, SIZE_T_MAX, 1);
|
|
|
|
|
|
|
|
static MYSQL_THDVAR_ENUM(stats_method, PLUGIN_VAR_RQCMDARG,
|
|
|
|
"Specifies how MyISAM index statistics collection code should "
|
|
|
|
"treat NULLs. Possible values of name are NULLS_UNEQUAL (default "
|
|
|
|
"behavior for 4.1 and later), NULLS_EQUAL (emulate 4.0 behavior), "
|
|
|
|
"and NULLS_IGNORED", NULL, NULL,
|
|
|
|
MI_STATS_METHOD_NULLS_NOT_EQUAL, &myisam_stats_method_typelib);
|
|
|
|
|
2009-04-16 13:32:56 +02:00
|
|
|
#ifndef DBUG_OFF
|
|
|
|
/**
|
|
|
|
Causes the thread to wait in a spin lock for a query kill signal.
|
|
|
|
This function is used by the test frame work to identify race conditions.
|
|
|
|
|
|
|
|
The signal is caught and ignored and the thread is not killed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void debug_wait_for_kill(const char *info)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("debug_wait_for_kill");
|
|
|
|
const char *prev_info;
|
|
|
|
THD *thd;
|
|
|
|
thd= current_thd;
|
|
|
|
prev_info= thd_proc_info(thd, info);
|
|
|
|
while(!thd->killed)
|
|
|
|
my_sleep(1000);
|
|
|
|
DBUG_PRINT("info", ("Exit debug_wait_for_kill"));
|
|
|
|
thd_proc_info(thd, prev_info);
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
#endif
|
2000-08-15 19:09:37 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/*****************************************************************************
|
|
|
|
** MyISAM tables
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2006-09-30 02:19:02 +02:00
|
|
|
static handler *myisam_create_handler(handlerton *hton,
|
|
|
|
TABLE_SHARE *table,
|
|
|
|
MEM_ROOT *mem_root)
|
2005-11-07 16:25:06 +01:00
|
|
|
{
|
2006-09-30 02:19:02 +02:00
|
|
|
return new (mem_root) ha_myisam(hton, table);
|
2005-11-07 16:25:06 +01:00
|
|
|
}
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
// collect errors printed by mi_check routines
|
2002-06-11 10:20:31 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
|
|
|
|
const char *fmt, va_list args)
|
|
|
|
{
|
|
|
|
THD* thd = (THD*)param->thd;
|
2002-12-11 08:17:51 +01:00
|
|
|
Protocol *protocol= thd->protocol;
|
2009-02-13 17:41:47 +01:00
|
|
|
size_t length, msg_length;
|
2000-07-31 21:29:14 +02:00
|
|
|
char msgbuf[MI_MAX_MSG_BUF];
|
2001-04-20 16:14:53 +02:00
|
|
|
char name[NAME_LEN*2+2];
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2002-12-11 08:17:51 +01:00
|
|
|
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
|
2000-07-31 21:29:14 +02:00
|
|
|
msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
|
|
|
|
|
2000-10-17 04:29:56 +02:00
|
|
|
DBUG_PRINT(msg_type,("message: %s",msgbuf));
|
|
|
|
|
2004-05-28 12:59:29 +02:00
|
|
|
if (!thd->vio_ok())
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2009-06-29 15:17:01 +02:00
|
|
|
sql_print_error("%s", msgbuf);
|
2000-07-31 21:29:14 +02:00
|
|
|
return;
|
|
|
|
}
|
2002-12-16 14:33:29 +01:00
|
|
|
|
2002-12-11 08:17:51 +01:00
|
|
|
if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR |
|
|
|
|
T_AUTO_REPAIR))
|
2000-08-15 19:09:37 +02:00
|
|
|
{
|
|
|
|
my_message(ER_NOT_KEYFILE,msgbuf,MYF(MY_WME));
|
|
|
|
return;
|
|
|
|
}
|
2001-04-20 16:14:53 +02:00
|
|
|
length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) -
|
|
|
|
name);
|
2007-06-06 01:42:41 +02:00
|
|
|
/*
|
|
|
|
TODO: switch from protocol to push_warning here. The main reason we didn't
|
|
|
|
it yet is parallel repair. Due to following trace:
|
|
|
|
mi_check_print_msg/push_warning/sql_alloc/my_pthread_getspecific_ptr.
|
|
|
|
|
|
|
|
Also we likely need to lock mutex here (in both cases with protocol and
|
|
|
|
push_warning).
|
|
|
|
*/
|
2009-10-27 15:27:27 +01:00
|
|
|
if (param->need_print_msg_lock)
|
2009-12-05 02:26:15 +01:00
|
|
|
mysql_mutex_lock(¶m->print_msg_mutex);
|
2011-01-11 10:07:37 +01:00
|
|
|
|
2002-12-11 08:17:51 +01:00
|
|
|
protocol->prepare_for_resend();
|
2003-03-17 10:14:04 +01:00
|
|
|
protocol->store(name, length, system_charset_info);
|
|
|
|
protocol->store(param->op_name, system_charset_info);
|
|
|
|
protocol->store(msg_type, system_charset_info);
|
|
|
|
protocol->store(msgbuf, msg_length, system_charset_info);
|
2002-12-11 08:17:51 +01:00
|
|
|
if (protocol->write())
|
2002-01-02 20:29:41 +01:00
|
|
|
sql_print_error("Failed on my_net_write, writing to stderr instead: %s\n",
|
|
|
|
msgbuf);
|
2011-01-11 10:07:37 +01:00
|
|
|
|
2009-10-27 15:27:27 +01:00
|
|
|
if (param->need_print_msg_lock)
|
2009-12-05 02:26:15 +01:00
|
|
|
mysql_mutex_unlock(¶m->print_msg_mutex);
|
2011-01-11 10:07:37 +01:00
|
|
|
|
2000-08-17 17:30:36 +02:00
|
|
|
return;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2007-01-31 13:15:20 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Convert TABLE object to MyISAM key and column definition
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
table2myisam()
|
|
|
|
table_arg in TABLE object.
|
|
|
|
keydef_out out MyISAM key definition.
|
|
|
|
recinfo_out out MyISAM column definition.
|
|
|
|
records_out out Number of fields.
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
This function will allocate and initialize MyISAM key and column
|
|
|
|
definition for further use in mi_create or for a check for underlying
|
|
|
|
table conformance in merge engine.
|
|
|
|
|
Bug#26379 - Combination of FLUSH TABLE and REPAIR TABLE
corrupts a MERGE table
Bug 26867 - LOCK TABLES + REPAIR + merge table result in
memory/cpu hogging
Bug 26377 - Deadlock with MERGE and FLUSH TABLE
Bug 25038 - Waiting TRUNCATE
Bug 25700 - merge base tables get corrupted by
optimize/analyze/repair table
Bug 30275 - Merge tables: flush tables or unlock tables
causes server to crash
Bug 19627 - temporary merge table locking
Bug 27660 - Falcon: merge table possible
Bug 30273 - merge tables: Can't lock file (errno: 155)
The problems were:
Bug 26379 - Combination of FLUSH TABLE and REPAIR TABLE
corrupts a MERGE table
1. A thread trying to lock a MERGE table performs busy waiting while
REPAIR TABLE or a similar table administration task is ongoing on
one or more of its MyISAM tables.
2. A thread trying to lock a MERGE table performs busy waiting until all
threads that did REPAIR TABLE or similar table administration tasks
on one or more of its MyISAM tables in LOCK TABLES segments do UNLOCK
TABLES. The difference against problem #1 is that the busy waiting
takes place *after* the administration task. It is terminated by
UNLOCK TABLES only.
3. Two FLUSH TABLES within a LOCK TABLES segment can invalidate the
lock. This does *not* require a MERGE table. The first FLUSH TABLES
can be replaced by any statement that requires other threads to
reopen the table. In 5.0 and 5.1 a single FLUSH TABLES can provoke
the problem.
Bug 26867 - LOCK TABLES + REPAIR + merge table result in
memory/cpu hogging
Trying DML on a MERGE table, which has a child locked and
repaired by another thread, made an infinite loop in the server.
Bug 26377 - Deadlock with MERGE and FLUSH TABLE
Locking a MERGE table and its children in parent-child order
and flushing the child deadlocked the server.
Bug 25038 - Waiting TRUNCATE
Truncating a MERGE child, while the MERGE table was in use,
let the truncate fail instead of waiting for the table to
become free.
Bug 25700 - merge base tables get corrupted by
optimize/analyze/repair table
Repairing a child of an open MERGE table corrupted the child.
It was necessary to FLUSH the child first.
Bug 30275 - Merge tables: flush tables or unlock tables
causes server to crash
Flushing and optimizing locked MERGE children crashed the server.
Bug 19627 - temporary merge table locking
Use of a temporary MERGE table with non-temporary children
could corrupt the children.
Temporary tables are never locked. So we do now prohibit
non-temporary chidlren of a temporary MERGE table.
Bug 27660 - Falcon: merge table possible
It was possible to create a MERGE table with non-MyISAM children.
Bug 30273 - merge tables: Can't lock file (errno: 155)
This was a Windows-only bug. Table administration statements
sometimes failed with "Can't lock file (errno: 155)".
These bugs are fixed by a new implementation of MERGE table open.
When opening a MERGE table in open_tables() we do now add the
child tables to the list of tables to be opened by open_tables()
(the "query_list"). The children are not opened in the handler at
this stage.
After opening the parent, open_tables() opens each child from the
now extended query_list. When the last child is opened, we remove
the children from the query_list again and attach the children to
the parent. This behaves similar to the old open. However it does
not open the MyISAM tables directly, but grabs them from the already
open children.
When closing a MERGE table in close_thread_table() we detach the
children only. Closing of the children is done implicitly because
they are in thd->open_tables.
For more detail see the comment at the top of ha_myisammrg.cc.
Changed from open_ltable() to open_and_lock_tables() in all places
that can be relevant for MERGE tables. The latter can handle tables
added to the list on the fly. When open_ltable() was used in a loop
over a list of tables, the list must be temporarily terminated
after every table for open_and_lock_tables().
table_list->required_type is set to FRMTYPE_TABLE to avoid open of
special tables. Handling of derived tables is suppressed.
These details are handled by the new function
open_n_lock_single_table(), which has nearly the same signature as
open_ltable() and can replace it in most cases.
In reopen_tables() some of the tables open by a thread can be
closed and reopened. When a MERGE child is affected, the parent
must be closed and reopened too. Closing of the parent is forced
before the first child is closed. Reopen happens in the order of
thd->open_tables. MERGE parents do not attach their children
automatically at open. This is done after all tables are reopened.
So all children are open when attaching them.
Special lock handling like mysql_lock_abort() or mysql_lock_remove()
needs to be suppressed for MERGE children or forwarded to the parent.
This depends on the situation. In loops over all open tables one
suppresses child lock handling. When a single table is touched,
forwarding is done.
Behavioral changes:
===================
This patch changes the behavior of temporary MERGE tables.
Temporary MERGE must have temporary children.
The old behavior was wrong. A temporary table is not locked. Hence
even non-temporary children were not locked. See
Bug 19627 - temporary merge table locking.
You cannot change the union list of a non-temporary MERGE table
when LOCK TABLES is in effect. The following does *not* work:
CREATE TABLE m1 ... ENGINE=MRG_MYISAM ...;
LOCK TABLES t1 WRITE, t2 WRITE, m1 WRITE;
ALTER TABLE m1 ... UNION=(t1,t2) ...;
However, you can do this with a temporary MERGE table.
You cannot create a MERGE table with CREATE ... SELECT, neither
as a temporary MERGE table, nor as a non-temporary MERGE table.
CREATE TABLE m1 ... ENGINE=MRG_MYISAM ... SELECT ...;
Gives error message: table is not BASE TABLE.
2007-11-15 20:25:43 +01:00
|
|
|
The caller needs to free *recinfo_out after use. Since *recinfo_out
|
|
|
|
and *keydef_out are allocated with a my_multi_malloc, *keydef_out
|
|
|
|
is freed automatically when *recinfo_out is freed.
|
|
|
|
|
2007-01-31 13:15:20 +01:00
|
|
|
RETURN VALUE
|
|
|
|
0 OK
|
|
|
|
!0 error code
|
|
|
|
*/
|
|
|
|
|
|
|
|
int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
|
|
|
|
MI_COLUMNDEF **recinfo_out, uint *records_out)
|
|
|
|
{
|
|
|
|
uint i, j, recpos, minpos, fieldpos, temp_length, length;
|
|
|
|
enum ha_base_keytype type= HA_KEYTYPE_BINARY;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar *record;
|
2007-01-31 13:15:20 +01:00
|
|
|
KEY *pos;
|
|
|
|
MI_KEYDEF *keydef;
|
|
|
|
MI_COLUMNDEF *recinfo, *recinfo_pos;
|
|
|
|
HA_KEYSEG *keyseg;
|
2007-01-31 14:09:58 +01:00
|
|
|
TABLE_SHARE *share= table_arg->s;
|
|
|
|
uint options= share->db_options_in_use;
|
2007-01-31 13:15:20 +01:00
|
|
|
DBUG_ENTER("table2myisam");
|
|
|
|
if (!(my_multi_malloc(MYF(MY_WME),
|
2007-01-31 14:09:58 +01:00
|
|
|
recinfo_out, (share->fields * 2 + 2) * sizeof(MI_COLUMNDEF),
|
|
|
|
keydef_out, share->keys * sizeof(MI_KEYDEF),
|
2007-01-31 13:15:20 +01:00
|
|
|
&keyseg,
|
2007-01-31 14:09:58 +01:00
|
|
|
(share->key_parts + share->keys) * sizeof(HA_KEYSEG),
|
2007-01-31 13:15:20 +01:00
|
|
|
NullS)))
|
|
|
|
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
|
|
|
|
keydef= *keydef_out;
|
|
|
|
recinfo= *recinfo_out;
|
|
|
|
pos= table_arg->key_info;
|
2007-01-31 14:09:58 +01:00
|
|
|
for (i= 0; i < share->keys; i++, pos++)
|
2007-01-31 13:15:20 +01:00
|
|
|
{
|
2007-11-10 18:39:30 +01:00
|
|
|
keydef[i].flag= ((uint16) pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL));
|
2007-01-31 13:15:20 +01:00
|
|
|
keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ?
|
|
|
|
(pos->flags & HA_SPATIAL ? HA_KEY_ALG_RTREE : HA_KEY_ALG_BTREE) :
|
|
|
|
pos->algorithm;
|
2007-01-31 15:57:54 +01:00
|
|
|
keydef[i].block_length= pos->block_size;
|
2007-01-31 13:15:20 +01:00
|
|
|
keydef[i].seg= keyseg;
|
|
|
|
keydef[i].keysegs= pos->key_parts;
|
|
|
|
for (j= 0; j < pos->key_parts; j++)
|
|
|
|
{
|
|
|
|
Field *field= pos->key_part[j].field;
|
|
|
|
type= field->key_type();
|
2007-01-31 14:09:58 +01:00
|
|
|
keydef[i].seg[j].flag= pos->key_part[j].key_part_flag;
|
2007-01-31 13:15:20 +01:00
|
|
|
|
|
|
|
if (options & HA_OPTION_PACK_KEYS ||
|
|
|
|
(pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY |
|
|
|
|
HA_SPACE_PACK_USED)))
|
|
|
|
{
|
|
|
|
if (pos->key_part[j].length > 8 &&
|
|
|
|
(type == HA_KEYTYPE_TEXT ||
|
|
|
|
type == HA_KEYTYPE_NUM ||
|
|
|
|
(type == HA_KEYTYPE_BINARY && !field->zero_pack())))
|
|
|
|
{
|
|
|
|
/* No blobs here */
|
|
|
|
if (j == 0)
|
|
|
|
keydef[i].flag|= HA_PACK_KEY;
|
|
|
|
if (!(field->flags & ZEROFILL_FLAG) &&
|
2007-01-31 14:09:58 +01:00
|
|
|
(field->type() == MYSQL_TYPE_STRING ||
|
|
|
|
field->type() == MYSQL_TYPE_VAR_STRING ||
|
2007-01-31 13:15:20 +01:00
|
|
|
((int) (pos->key_part[j].length - field->decimals())) >= 4))
|
|
|
|
keydef[i].seg[j].flag|= HA_SPACE_PACK;
|
|
|
|
}
|
|
|
|
else if (j == 0 && (!(pos->flags & HA_NOSAME) || pos->key_length > 16))
|
|
|
|
keydef[i].flag|= HA_BINARY_PACK_KEY;
|
|
|
|
}
|
|
|
|
keydef[i].seg[j].type= (int) type;
|
|
|
|
keydef[i].seg[j].start= pos->key_part[j].offset;
|
|
|
|
keydef[i].seg[j].length= pos->key_part[j].length;
|
2007-01-31 14:09:58 +01:00
|
|
|
keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end=
|
|
|
|
keydef[i].seg[j].bit_length= 0;
|
|
|
|
keydef[i].seg[j].bit_pos= 0;
|
2010-02-26 07:28:44 +01:00
|
|
|
keydef[i].seg[j].language= field->charset_for_protocol()->number;
|
2007-01-31 13:15:20 +01:00
|
|
|
|
|
|
|
if (field->null_ptr)
|
|
|
|
{
|
|
|
|
keydef[i].seg[j].null_bit= field->null_bit;
|
|
|
|
keydef[i].seg[j].null_pos= (uint) (field->null_ptr-
|
|
|
|
(uchar*) table_arg->record[0]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
keydef[i].seg[j].null_bit= 0;
|
|
|
|
keydef[i].seg[j].null_pos= 0;
|
|
|
|
}
|
2007-01-31 15:57:54 +01:00
|
|
|
if (field->type() == MYSQL_TYPE_BLOB ||
|
|
|
|
field->type() == MYSQL_TYPE_GEOMETRY)
|
2007-01-31 13:15:20 +01:00
|
|
|
{
|
|
|
|
keydef[i].seg[j].flag|= HA_BLOB_PART;
|
|
|
|
/* save number of bytes used to pack length */
|
|
|
|
keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
|
2007-01-31 14:09:58 +01:00
|
|
|
share->blob_ptr_size);
|
|
|
|
}
|
2007-01-31 15:57:54 +01:00
|
|
|
else if (field->type() == MYSQL_TYPE_BIT)
|
2007-01-31 14:09:58 +01:00
|
|
|
{
|
|
|
|
keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len;
|
|
|
|
keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs;
|
|
|
|
keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr -
|
|
|
|
(uchar*) table_arg->record[0]);
|
2007-01-31 13:15:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
keyseg+= pos->key_parts;
|
|
|
|
}
|
|
|
|
if (table_arg->found_next_number_field)
|
2007-01-31 14:09:58 +01:00
|
|
|
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
|
2007-01-31 15:57:54 +01:00
|
|
|
record= table_arg->record[0];
|
2007-01-31 13:15:20 +01:00
|
|
|
recpos= 0;
|
|
|
|
recinfo_pos= recinfo;
|
2007-01-31 14:09:58 +01:00
|
|
|
while (recpos < (uint) share->reclength)
|
2007-01-31 13:15:20 +01:00
|
|
|
{
|
|
|
|
Field **field, *found= 0;
|
2007-01-31 14:09:58 +01:00
|
|
|
minpos= share->reclength;
|
2007-01-31 13:15:20 +01:00
|
|
|
length= 0;
|
|
|
|
|
|
|
|
for (field= table_arg->field; *field; field++)
|
|
|
|
{
|
2007-01-31 15:57:54 +01:00
|
|
|
if ((fieldpos= (*field)->offset(record)) >= recpos &&
|
2007-01-31 13:15:20 +01:00
|
|
|
fieldpos <= minpos)
|
|
|
|
{
|
|
|
|
/* skip null fields */
|
2007-01-31 14:09:58 +01:00
|
|
|
if (!(temp_length= (*field)->pack_length_in_rec()))
|
2007-01-31 13:15:20 +01:00
|
|
|
continue; /* Skip null-fields */
|
|
|
|
if (! found || fieldpos < minpos ||
|
|
|
|
(fieldpos == minpos && temp_length < length))
|
|
|
|
{
|
|
|
|
minpos= fieldpos;
|
|
|
|
found= *field;
|
|
|
|
length= temp_length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-01-31 14:09:58 +01:00
|
|
|
DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d",
|
2007-01-31 13:15:20 +01:00
|
|
|
(long) found, recpos, minpos, length));
|
|
|
|
if (recpos != minpos)
|
|
|
|
{ // Reserved space (Null bits?)
|
|
|
|
bzero((char*) recinfo_pos, sizeof(*recinfo_pos));
|
|
|
|
recinfo_pos->type= (int) FIELD_NORMAL;
|
|
|
|
recinfo_pos++->length= (uint16) (minpos - recpos);
|
|
|
|
}
|
|
|
|
if (!found)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (found->flags & BLOB_FLAG)
|
|
|
|
recinfo_pos->type= (int) FIELD_BLOB;
|
2007-01-31 14:09:58 +01:00
|
|
|
else if (found->type() == MYSQL_TYPE_VARCHAR)
|
|
|
|
recinfo_pos->type= FIELD_VARCHAR;
|
2007-01-31 13:15:20 +01:00
|
|
|
else if (!(options & HA_OPTION_PACK_RECORD))
|
|
|
|
recinfo_pos->type= (int) FIELD_NORMAL;
|
|
|
|
else if (found->zero_pack())
|
|
|
|
recinfo_pos->type= (int) FIELD_SKIP_ZERO;
|
|
|
|
else
|
|
|
|
recinfo_pos->type= (int) ((length <= 3 ||
|
|
|
|
(found->flags & ZEROFILL_FLAG)) ?
|
|
|
|
FIELD_NORMAL :
|
2007-01-31 14:09:58 +01:00
|
|
|
found->type() == MYSQL_TYPE_STRING ||
|
|
|
|
found->type() == MYSQL_TYPE_VAR_STRING ?
|
2007-01-31 13:15:20 +01:00
|
|
|
FIELD_SKIP_ENDSPACE :
|
|
|
|
FIELD_SKIP_PRESPACE);
|
|
|
|
if (found->null_ptr)
|
|
|
|
{
|
|
|
|
recinfo_pos->null_bit= found->null_bit;
|
|
|
|
recinfo_pos->null_pos= (uint) (found->null_ptr -
|
|
|
|
(uchar*) table_arg->record[0]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
recinfo_pos->null_bit= 0;
|
|
|
|
recinfo_pos->null_pos= 0;
|
|
|
|
}
|
|
|
|
(recinfo_pos++)->length= (uint16) length;
|
|
|
|
recpos= minpos + length;
|
|
|
|
DBUG_PRINT("loop", ("length: %d type: %d",
|
|
|
|
recinfo_pos[-1].length,recinfo_pos[-1].type));
|
|
|
|
}
|
|
|
|
*records_out= (uint) (recinfo_pos - recinfo);
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Check for underlying table conformance
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
check_definition()
|
|
|
|
t1_keyinfo in First table key definition
|
|
|
|
t1_recinfo in First table record definition
|
|
|
|
t1_keys in Number of keys in first table
|
|
|
|
t1_recs in Number of records in first table
|
|
|
|
t2_keyinfo in Second table key definition
|
|
|
|
t2_recinfo in Second table record definition
|
|
|
|
t2_keys in Number of keys in second table
|
|
|
|
t2_recs in Number of records in second table
|
|
|
|
strict in Strict check switch
|
2009-04-08 08:55:19 +02:00
|
|
|
table in handle to the table object
|
2007-01-31 13:15:20 +01:00
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
This function compares two MyISAM definitions. By intention it was done
|
|
|
|
to compare merge table definition against underlying table definition.
|
|
|
|
It may also be used to compare dot-frm and MYI definitions of MyISAM
|
|
|
|
table as well to compare different MyISAM table definitions.
|
|
|
|
|
|
|
|
For merge table it is not required that number of keys in merge table
|
|
|
|
must exactly match number of keys in underlying table. When calling this
|
|
|
|
function for underlying table conformance check, 'strict' flag must be
|
|
|
|
set to false, and converted merge definition must be passed as t1_*.
|
|
|
|
|
|
|
|
Otherwise 'strict' flag must be set to 1 and it is not required to pass
|
|
|
|
converted dot-frm definition as t1_*.
|
|
|
|
|
2009-04-08 08:55:19 +02:00
|
|
|
For compatibility reasons we relax some checks, specifically:
|
|
|
|
- 4.0 (and earlier versions) always set key_alg to 0.
|
|
|
|
- 4.0 (and earlier versions) have the same language for all keysegs.
|
|
|
|
|
2007-01-31 13:15:20 +01:00
|
|
|
RETURN VALUE
|
|
|
|
0 - Equal definitions.
|
|
|
|
1 - Different definitions.
|
2007-03-13 15:02:06 +01:00
|
|
|
|
|
|
|
TODO
|
|
|
|
- compare FULLTEXT keys;
|
|
|
|
- compare SPATIAL keys;
|
|
|
|
- compare FIELD_SKIP_ZERO which is converted to FIELD_NORMAL correctly
|
|
|
|
(should be corretly detected in table2myisam).
|
2007-01-31 13:15:20 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo,
|
|
|
|
uint t1_keys, uint t1_recs,
|
|
|
|
MI_KEYDEF *t2_keyinfo, MI_COLUMNDEF *t2_recinfo,
|
2009-04-08 08:55:19 +02:00
|
|
|
uint t2_keys, uint t2_recs, bool strict, TABLE *table_arg)
|
2007-01-31 13:15:20 +01:00
|
|
|
{
|
|
|
|
uint i, j;
|
|
|
|
DBUG_ENTER("check_definition");
|
2009-04-08 08:55:19 +02:00
|
|
|
my_bool mysql_40_compat= table_arg && table_arg->s->frm_version < FRM_VER_TRUE_VARCHAR;
|
2007-01-31 13:15:20 +01:00
|
|
|
if ((strict ? t1_keys != t2_keys : t1_keys > t2_keys))
|
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Number of keys differs: t1_keys=%u, t2_keys=%u",
|
|
|
|
t1_keys, t2_keys));
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
if (t1_recs != t2_recs)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Number of recs differs: t1_recs=%u, t2_recs=%u",
|
|
|
|
t1_recs, t2_recs));
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
for (i= 0; i < t1_keys; i++)
|
|
|
|
{
|
|
|
|
HA_KEYSEG *t1_keysegs= t1_keyinfo[i].seg;
|
|
|
|
HA_KEYSEG *t2_keysegs= t2_keyinfo[i].seg;
|
2007-03-13 15:02:06 +01:00
|
|
|
if (t1_keyinfo[i].flag & HA_FULLTEXT && t2_keyinfo[i].flag & HA_FULLTEXT)
|
|
|
|
continue;
|
|
|
|
else if (t1_keyinfo[i].flag & HA_FULLTEXT ||
|
|
|
|
t2_keyinfo[i].flag & HA_FULLTEXT)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Key %d has different definition", i));
|
|
|
|
DBUG_PRINT("error", ("t1_fulltext= %d, t2_fulltext=%d",
|
|
|
|
test(t1_keyinfo[i].flag & HA_FULLTEXT),
|
|
|
|
test(t2_keyinfo[i].flag & HA_FULLTEXT)));
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
if (t1_keyinfo[i].flag & HA_SPATIAL && t2_keyinfo[i].flag & HA_SPATIAL)
|
|
|
|
continue;
|
|
|
|
else if (t1_keyinfo[i].flag & HA_SPATIAL ||
|
|
|
|
t2_keyinfo[i].flag & HA_SPATIAL)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Key %d has different definition", i));
|
|
|
|
DBUG_PRINT("error", ("t1_spatial= %d, t2_spatial=%d",
|
|
|
|
test(t1_keyinfo[i].flag & HA_SPATIAL),
|
|
|
|
test(t2_keyinfo[i].flag & HA_SPATIAL)));
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
2009-04-30 14:46:49 +02:00
|
|
|
if ((!mysql_40_compat &&
|
2009-04-08 08:55:19 +02:00
|
|
|
t1_keyinfo[i].key_alg != t2_keyinfo[i].key_alg) ||
|
|
|
|
t1_keyinfo[i].keysegs != t2_keyinfo[i].keysegs)
|
2007-01-31 13:15:20 +01:00
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Key %d has different definition", i));
|
|
|
|
DBUG_PRINT("error", ("t1_keysegs=%d, t1_key_alg=%d",
|
|
|
|
t1_keyinfo[i].keysegs, t1_keyinfo[i].key_alg));
|
|
|
|
DBUG_PRINT("error", ("t2_keysegs=%d, t2_key_alg=%d",
|
|
|
|
t2_keyinfo[i].keysegs, t2_keyinfo[i].key_alg));
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
for (j= t1_keyinfo[i].keysegs; j--;)
|
|
|
|
{
|
2008-02-06 21:26:05 +01:00
|
|
|
uint8 t1_keysegs_j__type= t1_keysegs[j].type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Table migration from 4.1 to 5.1. In 5.1 a *TEXT key part is
|
|
|
|
always HA_KEYTYPE_VARTEXT2. In 4.1 we had only the equivalent of
|
|
|
|
HA_KEYTYPE_VARTEXT1. Since we treat both the same on MyISAM
|
|
|
|
level, we can ignore a mismatch between these types.
|
|
|
|
*/
|
|
|
|
if ((t1_keysegs[j].flag & HA_BLOB_PART) &&
|
|
|
|
(t2_keysegs[j].flag & HA_BLOB_PART))
|
|
|
|
{
|
|
|
|
if ((t1_keysegs_j__type == HA_KEYTYPE_VARTEXT2) &&
|
|
|
|
(t2_keysegs[j].type == HA_KEYTYPE_VARTEXT1))
|
2008-03-10 19:00:45 +01:00
|
|
|
t1_keysegs_j__type= HA_KEYTYPE_VARTEXT1; /* purecov: tested */
|
2008-02-06 21:26:05 +01:00
|
|
|
else if ((t1_keysegs_j__type == HA_KEYTYPE_VARBINARY2) &&
|
|
|
|
(t2_keysegs[j].type == HA_KEYTYPE_VARBINARY1))
|
2008-03-10 19:00:45 +01:00
|
|
|
t1_keysegs_j__type= HA_KEYTYPE_VARBINARY1; /* purecov: inspected */
|
2008-02-06 21:26:05 +01:00
|
|
|
}
|
|
|
|
|
2009-04-30 14:46:49 +02:00
|
|
|
if ((!mysql_40_compat &&
|
2009-04-08 08:55:19 +02:00
|
|
|
t1_keysegs[j].language != t2_keysegs[j].language) ||
|
|
|
|
t1_keysegs_j__type != t2_keysegs[j].type ||
|
2007-01-31 13:15:20 +01:00
|
|
|
t1_keysegs[j].null_bit != t2_keysegs[j].null_bit ||
|
|
|
|
t1_keysegs[j].length != t2_keysegs[j].length)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Key segment %d (key %d) has different "
|
|
|
|
"definition", j, i));
|
|
|
|
DBUG_PRINT("error", ("t1_type=%d, t1_language=%d, t1_null_bit=%d, "
|
|
|
|
"t1_length=%d",
|
|
|
|
t1_keysegs[j].type, t1_keysegs[j].language,
|
|
|
|
t1_keysegs[j].null_bit, t1_keysegs[j].length));
|
|
|
|
DBUG_PRINT("error", ("t2_type=%d, t2_language=%d, t2_null_bit=%d, "
|
|
|
|
"t2_length=%d",
|
|
|
|
t2_keysegs[j].type, t2_keysegs[j].language,
|
|
|
|
t2_keysegs[j].null_bit, t2_keysegs[j].length));
|
|
|
|
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i= 0; i < t1_recs; i++)
|
|
|
|
{
|
|
|
|
MI_COLUMNDEF *t1_rec= &t1_recinfo[i];
|
|
|
|
MI_COLUMNDEF *t2_rec= &t2_recinfo[i];
|
2007-03-13 15:02:06 +01:00
|
|
|
/*
|
|
|
|
FIELD_SKIP_ZERO can be changed to FIELD_NORMAL in mi_create,
|
|
|
|
see NOTE1 in mi_create.c
|
|
|
|
*/
|
|
|
|
if ((t1_rec->type != t2_rec->type &&
|
|
|
|
!(t1_rec->type == (int) FIELD_SKIP_ZERO &&
|
|
|
|
t1_rec->length == 1 &&
|
|
|
|
t2_rec->type == (int) FIELD_NORMAL)) ||
|
2007-01-31 13:15:20 +01:00
|
|
|
t1_rec->length != t2_rec->length ||
|
|
|
|
t1_rec->null_bit != t2_rec->null_bit)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("error", ("Field %d has different definition", i));
|
|
|
|
DBUG_PRINT("error", ("t1_type=%d, t1_length=%d, t1_null_bit=%d",
|
|
|
|
t1_rec->type, t1_rec->length, t1_rec->null_bit));
|
|
|
|
DBUG_PRINT("error", ("t2_type=%d, t2_length=%d, t2_null_bit=%d",
|
|
|
|
t2_rec->type, t2_rec->length, t2_rec->null_bit));
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
extern "C" {
|
|
|
|
|
2004-07-12 06:43:38 +02:00
|
|
|
volatile int *killed_ptr(MI_CHECK *param)
|
2002-10-22 02:25:36 +02:00
|
|
|
{
|
2004-07-12 06:43:38 +02:00
|
|
|
/* In theory Unsafe conversion, but should be ok for now */
|
|
|
|
return (int*) &(((THD *)(param->thd))->killed);
|
2002-10-22 02:25:36 +02:00
|
|
|
}
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
void mi_check_print_error(MI_CHECK *param, const char *fmt,...)
|
|
|
|
{
|
|
|
|
param->error_printed|=1;
|
2000-09-25 23:33:25 +02:00
|
|
|
param->out_flag|= O_DATA_LOST;
|
2000-07-31 21:29:14 +02:00
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
mi_check_print_msg(param, "error", fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mi_check_print_info(MI_CHECK *param, const char *fmt,...)
|
|
|
|
{
|
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
mi_check_print_msg(param, "info", fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
|
|
|
|
{
|
|
|
|
param->warning_printed=1;
|
2000-09-25 23:33:25 +02:00
|
|
|
param->out_flag|= O_DATA_LOST;
|
2000-07-31 21:29:14 +02:00
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
mi_check_print_msg(param, "warning", fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
2009-11-25 13:25:01 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
Report list of threads (and queries) accessing a table, thread_id of a
|
|
|
|
thread that detected corruption, ource file name and line number where
|
|
|
|
this corruption was detected, optional extra information (string).
|
|
|
|
|
|
|
|
This function is intended to be used when table corruption is detected.
|
|
|
|
|
|
|
|
@param[in] file MI_INFO object.
|
|
|
|
@param[in] message Optional error message.
|
|
|
|
@param[in] sfile Name of source file.
|
|
|
|
@param[in] sline Line number in source file.
|
|
|
|
|
|
|
|
@return void
|
|
|
|
*/
|
|
|
|
|
|
|
|
void _mi_report_crashed(MI_INFO *file, const char *message,
|
|
|
|
const char *sfile, uint sline)
|
|
|
|
{
|
|
|
|
THD *cur_thd;
|
|
|
|
LIST *element;
|
|
|
|
char buf[1024];
|
2009-12-10 10:19:06 +01:00
|
|
|
mysql_mutex_lock(&file->s->intern_lock);
|
2009-11-25 13:25:01 +01:00
|
|
|
if ((cur_thd= (THD*) file->in_use.data))
|
|
|
|
sql_print_error("Got an error from thread_id=%lu, %s:%d", cur_thd->thread_id,
|
|
|
|
sfile, sline);
|
|
|
|
else
|
|
|
|
sql_print_error("Got an error from unknown thread, %s:%d", sfile, sline);
|
|
|
|
if (message)
|
|
|
|
sql_print_error("%s", message);
|
|
|
|
for (element= file->s->in_use; element; element= list_rest(element))
|
|
|
|
{
|
|
|
|
THD *thd= (THD*) element->data;
|
|
|
|
sql_print_error("%s", thd ? thd_security_context(thd, buf, sizeof(buf), 0)
|
|
|
|
: "Unknown thread accessing table");
|
|
|
|
}
|
2009-12-10 10:19:06 +01:00
|
|
|
mysql_mutex_unlock(&file->s->intern_lock);
|
2009-11-25 13:25:01 +01:00
|
|
|
}
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2005-07-19 20:21:12 +02:00
|
|
|
|
2006-09-30 02:19:02 +02:00
|
|
|
ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
|
|
|
|
:handler(hton, table_arg), file(0),
|
2005-07-19 20:21:12 +02:00
|
|
|
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
|
2007-05-28 12:50:29 +02:00
|
|
|
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
HA_DUPLICATE_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
|
|
|
|
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_NO_TRANSACTIONS |
|
|
|
|
HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS |
|
2011-03-08 09:41:57 +01:00
|
|
|
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_REPAIR),
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
can_enable_indexes(1)
|
2005-07-19 20:21:12 +02:00
|
|
|
{}
|
|
|
|
|
2011-03-25 12:36:02 +01:00
|
|
|
handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root)
|
2006-09-12 15:25:35 +02:00
|
|
|
{
|
2011-03-25 12:36:02 +01:00
|
|
|
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(name,
|
|
|
|
mem_root));
|
2006-09-12 15:25:35 +02:00
|
|
|
if (new_handler)
|
|
|
|
new_handler->file->state= file->state;
|
|
|
|
return new_handler;
|
|
|
|
}
|
|
|
|
|
2005-07-19 20:21:12 +02:00
|
|
|
|
2005-04-27 11:25:08 +02:00
|
|
|
static const char *ha_myisam_exts[] = {
|
|
|
|
".MYI",
|
|
|
|
".MYD",
|
|
|
|
NullS
|
|
|
|
};
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
const char **ha_myisam::bas_ext() const
|
2005-04-27 11:25:08 +02:00
|
|
|
{
|
|
|
|
return ha_myisam_exts;
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
|
2002-01-02 20:29:41 +01:00
|
|
|
const char *ha_myisam::index_type(uint key_number)
|
|
|
|
{
|
2002-04-18 11:08:38 +02:00
|
|
|
return ((table->key_info[key_number].flags & HA_FULLTEXT) ?
|
2002-01-02 20:29:41 +01:00
|
|
|
"FULLTEXT" :
|
2002-04-18 11:08:38 +02:00
|
|
|
(table->key_info[key_number].flags & HA_SPATIAL) ?
|
|
|
|
"SPATIAL" :
|
|
|
|
(table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
|
|
|
|
"RTREE" :
|
2002-01-02 20:29:41 +01:00
|
|
|
"BTREE");
|
|
|
|
}
|
|
|
|
|
2006-01-19 03:56:06 +01:00
|
|
|
|
WL#3984 (Revise locking of mysql.general_log and mysql.slow_log)
Bug#25422 (Hang with log tables)
Bug 17876 (Truncating mysql.slow_log in a SP after using cursor locks the
thread)
Bug 23044 (Warnings on flush of a log table)
Bug 29129 (Resetting general_log while the GLOBAL READ LOCK is set causes
a deadlock)
Prior to this fix, the server would hang when performing concurrent
ALTER TABLE or TRUNCATE TABLE statements against the LOG TABLES,
which are mysql.general_log and mysql.slow_log.
The root cause traces to the following code:
in sql_base.cc, open_table()
if (table->in_use != thd)
{
/* wait_for_condition will unlock LOCK_open for us */
wait_for_condition(thd, &LOCK_open, &COND_refresh);
}
The problem with this code is that the current implementation of the
LOGGER creates 'fake' THD objects, like
- Log_to_csv_event_handler::general_log_thd
- Log_to_csv_event_handler::slow_log_thd
which are not associated to a real thread running in the server,
so that waiting for these non-existing threads to release table locks
cause the dead lock.
In general, the design of Log_to_csv_event_handler does not fit into the
general architecture of the server, so that the concept of general_log_thd
and slow_log_thd has to be abandoned:
- this implementation does not work with table locking
- it will not work with commands like SHOW PROCESSLIST
- having the log tables always opened does not integrate well with DDL
operations / FLUSH TABLES / SET GLOBAL READ_ONLY
With this patch, the fundamental design of the LOGGER has been changed to:
- always open and close a log table when writing a log
- remove totally the usage of fake THD objects
- clarify how locking of log tables is implemented in general.
See WL#3984 for details related to the new locking design.
Additional changes (misc bugs exposed and fixed):
1)
mysqldump which would ignore some tables in dump_all_tables_in_db(),
but forget to ignore the same in dump_all_views_in_db().
2)
mysqldump would also issue an empty "LOCK TABLE" command when all the tables
to lock are to be ignored (numrows == 0), instead of not issuing the query.
3)
Internal errors handlers could intercept errors but not warnings
(see sql_error.cc).
4)
Implementing a nested call to open tables, for the performance schema tables,
exposed an existing bug in remove_table_from_cache(), which would perform:
in_use->some_tables_deleted=1;
against another thread, without any consideration about thread locking.
This call inside remove_table_from_cache() was not required anyway,
since calling mysql_lock_abort() takes care of aborting -- cleanly -- threads
that might hold a lock on a table.
This line (in_use->some_tables_deleted=1) has been removed.
2007-07-27 08:31:06 +02:00
|
|
|
/* Name is here without an extension */
|
2000-09-25 23:33:25 +02:00
|
|
|
int ha_myisam::open(const char *name, int mode, uint test_if_locked)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2007-03-21 14:12:30 +01:00
|
|
|
MI_KEYDEF *keyinfo;
|
|
|
|
MI_COLUMNDEF *recinfo= 0;
|
|
|
|
uint recs;
|
2005-11-06 13:13:06 +01:00
|
|
|
uint i;
|
2007-02-13 16:33:32 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
If the user wants to have memory mapped data files, add an
|
|
|
|
open_flag. Do not memory map temporary tables because they are
|
|
|
|
expected to be inserted and thus extended a lot. Memory mapping is
|
|
|
|
efficient for files that keep their size, but very inefficient for
|
|
|
|
growing files. Using an open_flag instead of calling mi_extra(...
|
|
|
|
HA_EXTRA_MMAP ...) after mi_open() has the advantage that the
|
|
|
|
mapping is not repeated for every open, but just done on the initial
|
|
|
|
open, when the MyISAM share is created. Everytime the server
|
|
|
|
requires to open a new instance of a table it calls this method. We
|
|
|
|
will always supply HA_OPEN_MMAP for a permanent table. However, the
|
|
|
|
MyISAM storage engine will ignore this flag if this is a secondary
|
|
|
|
open of a table that is in use by other threads already (if the
|
|
|
|
MyISAM share exists already).
|
|
|
|
*/
|
|
|
|
if (!(test_if_locked & HA_OPEN_TMP_TABLE) && opt_myisam_use_mmap)
|
|
|
|
test_if_locked|= HA_OPEN_MMAP;
|
|
|
|
|
2005-12-28 13:05:30 +01:00
|
|
|
if (!(file=mi_open(name, mode, test_if_locked | HA_OPEN_FROM_SQL_LAYER)))
|
2000-07-31 21:29:14 +02:00
|
|
|
return (my_errno ? my_errno : -1);
|
2007-03-21 14:12:30 +01:00
|
|
|
if (!table->s->tmp_table) /* No need to perform a check for tmp table */
|
|
|
|
{
|
|
|
|
if ((my_errno= table2myisam(table, &keyinfo, &recinfo, &recs)))
|
|
|
|
{
|
|
|
|
/* purecov: begin inspected */
|
|
|
|
DBUG_PRINT("error", ("Failed to convert TABLE object to MyISAM "
|
|
|
|
"key and column definition"));
|
|
|
|
goto err;
|
|
|
|
/* purecov: end */
|
|
|
|
}
|
|
|
|
if (check_definition(keyinfo, recinfo, table->s->keys, recs,
|
|
|
|
file->s->keyinfo, file->s->rec,
|
2009-04-08 08:55:19 +02:00
|
|
|
file->s->base.keys, file->s->base.fields,
|
|
|
|
true, table))
|
2007-03-21 14:12:30 +01:00
|
|
|
{
|
|
|
|
/* purecov: begin inspected */
|
|
|
|
my_errno= HA_ERR_CRASHED;
|
|
|
|
goto err;
|
|
|
|
/* purecov: end */
|
|
|
|
}
|
|
|
|
}
|
2003-08-02 11:43:18 +02:00
|
|
|
|
2000-09-25 23:33:25 +02:00
|
|
|
if (test_if_locked & (HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_TMP_TABLE))
|
2009-11-24 14:54:59 +01:00
|
|
|
(void) mi_extra(file, HA_EXTRA_NO_WAIT_LOCK, 0);
|
2005-12-01 13:34:48 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
|
|
|
if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED))
|
2009-11-24 14:54:59 +01:00
|
|
|
(void) mi_extra(file, HA_EXTRA_WAIT_LOCK, 0);
|
2005-01-06 12:00:13 +01:00
|
|
|
if (!table->s->db_record_offset)
|
2002-04-12 20:35:46 +02:00
|
|
|
int_table_flags|=HA_REC_NOT_IN_SEQ;
|
2003-08-05 21:14:15 +02:00
|
|
|
if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
|
|
|
|
int_table_flags|=HA_HAS_CHECKSUM;
|
2005-11-06 13:13:06 +01:00
|
|
|
|
|
|
|
for (i= 0; i < table->s->keys; i++)
|
|
|
|
{
|
2007-03-02 17:43:45 +01:00
|
|
|
plugin_ref parser= table->key_info[i].parser;
|
2005-11-06 13:13:06 +01:00
|
|
|
if (table->key_info[i].flags & HA_USES_PARSER)
|
|
|
|
file->s->keyinfo[i].parser=
|
2007-03-02 17:43:45 +01:00
|
|
|
(struct st_mysql_ftparser *)plugin_decl(parser)->info;
|
2006-05-03 14:59:17 +02:00
|
|
|
table->key_info[i].block_size= file->s->keyinfo[i].block_length;
|
2005-11-06 13:13:06 +01:00
|
|
|
}
|
2007-03-22 21:28:28 +01:00
|
|
|
my_errno= 0;
|
|
|
|
goto end;
|
|
|
|
err:
|
2007-03-21 14:12:30 +01:00
|
|
|
this->close();
|
2007-03-22 21:28:28 +01:00
|
|
|
end:
|
2007-03-21 14:12:30 +01:00
|
|
|
/*
|
|
|
|
Both recinfo and keydef are allocated by my_multi_malloc(), thus only
|
|
|
|
recinfo must be freed.
|
|
|
|
*/
|
|
|
|
if (recinfo)
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(recinfo);
|
2007-03-21 14:12:30 +01:00
|
|
|
return my_errno;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int ha_myisam::close(void)
|
|
|
|
{
|
|
|
|
MI_INFO *tmp=file;
|
|
|
|
file=0;
|
|
|
|
return mi_close(tmp);
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::write_row(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_write_count);
|
2001-08-30 19:38:46 +02:00
|
|
|
|
|
|
|
/* If we have a timestamp column, update it to the current time */
|
2004-10-01 16:54:06 +02:00
|
|
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
|
|
|
table->timestamp_field->set_time();
|
2001-08-30 19:38:46 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
If we have an auto_increment column and we are writing a changed row
|
|
|
|
or a new row, then update the auto_increment value in the record.
|
|
|
|
*/
|
2000-07-31 21:29:14 +02:00
|
|
|
if (table->next_number_field && buf == table->record[0])
|
2006-08-30 22:20:39 +02:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
if ((error= update_auto_increment()))
|
|
|
|
return error;
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
return mi_write(file,buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
|
|
|
|
{
|
2000-09-12 02:02:33 +02:00
|
|
|
if (!file) return HA_ADMIN_INTERNAL_ERROR;
|
2000-12-05 02:00:31 +01:00
|
|
|
int error;
|
2000-07-31 21:29:14 +02:00
|
|
|
MI_CHECK param;
|
|
|
|
MYISAM_SHARE* share = file->s;
|
2000-12-05 02:00:31 +01:00
|
|
|
const char *old_proc_info=thd->proc_info;
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Checking table");
|
2000-07-31 21:29:14 +02:00
|
|
|
myisamchk_init(¶m);
|
|
|
|
param.thd = thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name = "check";
|
2005-11-23 21:45:02 +01:00
|
|
|
param.db_name= table->s->db.str;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.table_name= table->alias;
|
2001-01-16 14:02:25 +01:00
|
|
|
param.testflag = check_opt->flags | T_CHECK | T_SILENT;
|
2009-12-22 10:35:56 +01:00
|
|
|
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
if (!(table->db_stat & HA_READ_ONLY))
|
|
|
|
param.testflag|= T_STATISTICS;
|
|
|
|
param.using_global_keycache = 1;
|
|
|
|
|
2000-08-17 00:05:02 +02:00
|
|
|
if (!mi_is_crashed(file) &&
|
|
|
|
(((param.testflag & T_CHECK_ONLY_CHANGED) &&
|
2000-10-05 01:22:28 +02:00
|
|
|
!(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
|
|
|
|
STATE_CRASHED_ON_REPAIR)) &&
|
2000-08-18 11:48:00 +02:00
|
|
|
share->state.open_count == 0) ||
|
2000-10-05 01:22:28 +02:00
|
|
|
((param.testflag & T_FAST) && (share->state.open_count ==
|
2000-10-10 23:06:37 +02:00
|
|
|
(uint) (share->global_changed ? 1 : 0)))))
|
2000-09-12 02:02:33 +02:00
|
|
|
return HA_ADMIN_ALREADY_DONE;
|
2000-08-17 00:05:02 +02:00
|
|
|
|
2000-11-22 02:45:02 +01:00
|
|
|
error = chk_status(¶m, file); // Not fatal
|
2000-07-31 21:29:14 +02:00
|
|
|
error = chk_size(¶m, file);
|
2000-08-17 00:05:02 +02:00
|
|
|
if (!error)
|
|
|
|
error |= chk_del(¶m, file, param.testflag);
|
|
|
|
if (!error)
|
|
|
|
error = chk_key(¶m, file);
|
|
|
|
if (!error)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2002-03-13 18:20:17 +01:00
|
|
|
if ((!(param.testflag & T_QUICK) &&
|
2001-01-16 14:02:25 +01:00
|
|
|
((share->options &
|
|
|
|
(HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ||
|
|
|
|
(param.testflag & (T_EXTEND | T_MEDIUM)))) ||
|
2000-11-22 02:45:02 +01:00
|
|
|
mi_is_crashed(file))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2001-01-16 14:02:25 +01:00
|
|
|
uint old_testflag=param.testflag;
|
|
|
|
param.testflag|=T_MEDIUM;
|
2006-01-03 17:54:54 +01:00
|
|
|
if (!(error= init_io_cache(¶m.read_cache, file->dfile,
|
|
|
|
my_default_record_cache_size, READ_CACHE,
|
|
|
|
share->pack.header_length, 1, MYF(MY_WME))))
|
|
|
|
{
|
|
|
|
error= chk_data_link(¶m, file, param.testflag & T_EXTEND);
|
|
|
|
end_io_cache(&(param.read_cache));
|
|
|
|
}
|
|
|
|
param.testflag= old_testflag;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!error)
|
2000-08-17 17:30:36 +02:00
|
|
|
{
|
2000-08-18 11:56:57 +02:00
|
|
|
if ((share->state.changed & (STATE_CHANGED |
|
2000-08-18 11:48:00 +02:00
|
|
|
STATE_CRASHED_ON_REPAIR |
|
|
|
|
STATE_CRASHED | STATE_NOT_ANALYZED)) ||
|
2000-11-22 02:45:02 +01:00
|
|
|
(param.testflag & T_STATISTICS) ||
|
|
|
|
mi_is_crashed(file))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
|
2009-12-05 02:26:15 +01:00
|
|
|
mysql_mutex_lock(&share->intern_lock);
|
2000-08-18 11:48:00 +02:00
|
|
|
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
|
|
|
|
STATE_CRASHED_ON_REPAIR);
|
2000-07-31 21:29:14 +02:00
|
|
|
if (!(table->db_stat & HA_READ_ONLY))
|
2000-08-17 00:05:02 +02:00
|
|
|
error=update_state_info(¶m,file,UPDATE_TIME | UPDATE_OPEN_COUNT |
|
|
|
|
UPDATE_STAT);
|
2009-12-05 02:26:15 +01:00
|
|
|
mysql_mutex_unlock(&share->intern_lock);
|
2000-08-17 00:05:02 +02:00
|
|
|
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
|
|
|
|
HA_STATUS_CONST);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
}
|
2004-07-13 16:51:10 +02:00
|
|
|
else if (!mi_is_crashed(file) && !thd->killed)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
mi_mark_crashed(file);
|
|
|
|
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
|
|
|
|
}
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, old_proc_info);
|
2000-09-12 02:02:33 +02:00
|
|
|
return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
analyze the key distribution in the table
|
|
|
|
As the table may be only locked for read, we have to take into account that
|
|
|
|
two threads may do an analyze at the same time!
|
|
|
|
*/
|
|
|
|
|
2000-09-12 02:02:33 +02:00
|
|
|
int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2000-08-18 11:48:00 +02:00
|
|
|
int error=0;
|
2000-07-31 21:29:14 +02:00
|
|
|
MI_CHECK param;
|
|
|
|
MYISAM_SHARE* share = file->s;
|
2000-08-17 17:30:36 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
myisamchk_init(¶m);
|
|
|
|
param.thd = thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name= "analyze";
|
2005-11-23 21:45:02 +01:00
|
|
|
param.db_name= table->s->db.str;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.table_name= table->alias;
|
|
|
|
param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
|
|
|
|
T_DONT_CHECK_CHECKSUM);
|
2000-07-31 21:29:14 +02:00
|
|
|
param.using_global_keycache = 1;
|
2009-12-22 10:35:56 +01:00
|
|
|
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2000-09-12 02:02:33 +02:00
|
|
|
if (!(share->state.changed & STATE_NOT_ANALYZED))
|
|
|
|
return HA_ADMIN_ALREADY_DONE;
|
|
|
|
|
|
|
|
error = chk_key(¶m, file);
|
|
|
|
if (!error)
|
2000-08-17 17:30:36 +02:00
|
|
|
{
|
2009-12-05 02:26:15 +01:00
|
|
|
mysql_mutex_lock(&share->intern_lock);
|
2000-09-12 02:02:33 +02:00
|
|
|
error=update_state_info(¶m,file,UPDATE_STAT);
|
2009-12-05 02:26:15 +01:00
|
|
|
mysql_mutex_unlock(&share->intern_lock);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2004-07-13 16:51:10 +02:00
|
|
|
else if (!mi_is_crashed(file) && !thd->killed)
|
2000-09-12 02:02:33 +02:00
|
|
|
mi_mark_crashed(file);
|
|
|
|
return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2000-10-10 23:06:37 +02:00
|
|
|
|
2000-08-15 19:09:37 +02:00
|
|
|
int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2000-10-10 23:06:37 +02:00
|
|
|
int error;
|
2000-07-31 21:29:14 +02:00
|
|
|
MI_CHECK param;
|
2000-10-17 15:19:24 +02:00
|
|
|
ha_rows start_records;
|
2000-08-15 19:09:37 +02:00
|
|
|
|
2000-10-23 14:35:42 +02:00
|
|
|
if (!file) return HA_ADMIN_INTERNAL_ERROR;
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
myisamchk_init(¶m);
|
|
|
|
param.thd = thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name= "repair";
|
|
|
|
param.testflag= ((check_opt->flags & ~(T_EXTEND)) |
|
|
|
|
T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM |
|
|
|
|
(check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT));
|
2009-12-22 10:35:56 +01:00
|
|
|
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
|
2000-10-17 15:19:24 +02:00
|
|
|
start_records=file->state->records;
|
|
|
|
while ((error=repair(thd,param,0)) && param.retry_repair)
|
2000-10-10 23:06:37 +02:00
|
|
|
{
|
2000-10-17 15:19:24 +02:00
|
|
|
param.retry_repair=0;
|
2002-06-11 10:20:31 +02:00
|
|
|
if (test_all_bits(param.testflag,
|
|
|
|
(uint) (T_RETRY_WITHOUT_QUICK | T_QUICK)))
|
2000-10-10 23:06:37 +02:00
|
|
|
{
|
2002-03-13 18:20:17 +01:00
|
|
|
param.testflag&= ~T_RETRY_WITHOUT_QUICK;
|
2004-09-04 20:17:09 +02:00
|
|
|
sql_print_information("Retrying repair of: '%s' without quick",
|
2006-11-21 21:32:58 +01:00
|
|
|
table->s->path.str);
|
2000-10-10 23:06:37 +02:00
|
|
|
continue;
|
|
|
|
}
|
2002-03-13 18:20:17 +01:00
|
|
|
param.testflag&= ~T_QUICK;
|
2000-10-10 23:06:37 +02:00
|
|
|
if ((param.testflag & T_REP_BY_SORT))
|
|
|
|
{
|
2000-10-17 04:29:56 +02:00
|
|
|
param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP;
|
2004-09-04 20:17:09 +02:00
|
|
|
sql_print_information("Retrying repair of: '%s' with keycache",
|
2006-11-21 21:32:58 +01:00
|
|
|
table->s->path.str);
|
2000-10-10 23:06:37 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2001-03-11 20:20:15 +01:00
|
|
|
if (!error && start_records != file->state->records &&
|
|
|
|
!(check_opt->flags & T_VERY_SILENT))
|
2000-10-17 15:19:24 +02:00
|
|
|
{
|
|
|
|
char llbuff[22],llbuff2[22];
|
2004-09-04 20:17:09 +02:00
|
|
|
sql_print_information("Found %s of %s rows when repairing '%s'",
|
|
|
|
llstr(file->state->records, llbuff),
|
|
|
|
llstr(start_records, llbuff2),
|
2006-11-21 21:32:58 +01:00
|
|
|
table->s->path.str);
|
2000-11-02 17:36:21 +01:00
|
|
|
}
|
2000-10-10 23:06:37 +02:00
|
|
|
return error;
|
2000-09-12 02:02:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
|
|
|
|
{
|
2004-10-14 20:02:56 +02:00
|
|
|
int error;
|
2000-09-12 02:02:33 +02:00
|
|
|
if (!file) return HA_ADMIN_INTERNAL_ERROR;
|
|
|
|
MI_CHECK param;
|
|
|
|
|
|
|
|
myisamchk_init(¶m);
|
|
|
|
param.thd = thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name= "optimize";
|
|
|
|
param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
|
|
|
|
T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX);
|
2009-12-22 10:35:56 +01:00
|
|
|
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
|
2004-10-14 20:02:56 +02:00
|
|
|
if ((error= repair(thd,param,1)) && param.retry_repair)
|
|
|
|
{
|
2007-05-04 19:17:07 +02:00
|
|
|
sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying",
|
|
|
|
my_errno, param.db_name, param.table_name);
|
2004-10-14 20:02:56 +02:00
|
|
|
param.testflag&= ~T_REP_BY_SORT;
|
|
|
|
error= repair(thd,param,1);
|
|
|
|
}
|
|
|
|
return error;
|
2000-08-15 19:09:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-12-14 23:51:37 +01:00
|
|
|
int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool do_optimize)
|
2000-08-15 19:09:37 +02:00
|
|
|
{
|
2000-09-12 02:02:33 +02:00
|
|
|
int error=0;
|
2001-11-18 13:08:17 +01:00
|
|
|
uint local_testflag=param.testflag;
|
2006-12-14 23:51:37 +01:00
|
|
|
bool optimize_done= !do_optimize, statistics_done=0;
|
2000-09-12 02:02:33 +02:00
|
|
|
const char *old_proc_info=thd->proc_info;
|
2001-06-05 02:38:10 +02:00
|
|
|
char fixed_name[FN_REFLEN];
|
2000-08-15 19:09:37 +02:00
|
|
|
MYISAM_SHARE* share = file->s;
|
2000-11-29 04:09:28 +01:00
|
|
|
ha_rows rows= file->state->records;
|
2000-10-17 04:29:56 +02:00
|
|
|
DBUG_ENTER("ha_myisam::repair");
|
2000-08-15 19:09:37 +02:00
|
|
|
|
2005-11-23 21:45:02 +01:00
|
|
|
param.db_name= table->s->db.str;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.table_name= table->alias;
|
2000-07-31 21:29:14 +02:00
|
|
|
param.tmpfile_createflag = O_RDWR | O_TRUNC;
|
|
|
|
param.using_global_keycache = 1;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.thd= thd;
|
|
|
|
param.tmpdir= &mysql_tmpdir_list;
|
|
|
|
param.out_flag= 0;
|
2001-06-05 02:38:10 +02:00
|
|
|
strmov(fixed_name,file->filename);
|
2000-09-12 02:02:33 +02:00
|
|
|
|
2008-12-04 16:03:02 +01:00
|
|
|
// Release latches since this can take a long time
|
|
|
|
ha_release_temporary_latches(thd);
|
|
|
|
|
2001-04-18 22:47:11 +02:00
|
|
|
// Don't lock tables if we have used LOCK TABLE
|
2009-12-01 15:39:03 +01:00
|
|
|
if (! thd->locked_tables_mode &&
|
2005-01-06 12:00:13 +01:00
|
|
|
mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK))
|
2000-10-23 14:35:42 +02:00
|
|
|
{
|
|
|
|
mi_check_print_error(¶m,ER(ER_CANT_LOCK),my_errno);
|
|
|
|
DBUG_RETURN(HA_ADMIN_FAILED);
|
|
|
|
}
|
|
|
|
|
2006-12-14 23:51:37 +01:00
|
|
|
if (!do_optimize ||
|
2000-09-20 03:54:10 +02:00
|
|
|
((file->state->del || share->state.split != file->state->records) &&
|
2002-03-13 18:20:17 +01:00
|
|
|
(!(param.testflag & T_QUICK) ||
|
2000-09-20 03:54:10 +02:00
|
|
|
!(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))))
|
2000-09-12 02:02:33 +02:00
|
|
|
{
|
2002-08-31 22:42:41 +02:00
|
|
|
ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
|
2005-07-19 14:13:56 +02:00
|
|
|
mi_get_mask_all_keys_active(share->base.keys) :
|
2001-03-28 03:16:04 +02:00
|
|
|
share->state.key_map);
|
2001-11-18 13:08:17 +01:00
|
|
|
uint testflag=param.testflag;
|
2011-10-20 13:03:22 +02:00
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
bool remap= test(share->file_map);
|
|
|
|
/*
|
|
|
|
mi_repair*() functions family use file I/O even if memory
|
|
|
|
mapping is available.
|
|
|
|
|
|
|
|
Since mixing mmap I/O and file I/O may cause various artifacts,
|
|
|
|
memory mapping must be disabled.
|
|
|
|
*/
|
|
|
|
if (remap)
|
|
|
|
mi_munmap_file(file);
|
|
|
|
#endif
|
2001-03-28 03:16:04 +02:00
|
|
|
if (mi_test_if_sort_rep(file,file->state->records,key_map,0) &&
|
2001-11-18 13:08:17 +01:00
|
|
|
(local_testflag & T_REP_BY_SORT))
|
2000-09-12 02:02:33 +02:00
|
|
|
{
|
2001-11-18 13:08:17 +01:00
|
|
|
local_testflag|= T_STATISTICS;
|
2000-09-12 02:02:33 +02:00
|
|
|
param.testflag|= T_STATISTICS; // We get this for free
|
2000-10-17 15:19:24 +02:00
|
|
|
statistics_done=1;
|
2009-12-22 10:35:56 +01:00
|
|
|
if (THDVAR(thd, repair_threads)>1)
|
2003-05-04 18:43:37 +02:00
|
|
|
{
|
|
|
|
char buf[40];
|
|
|
|
/* TODO: respect myisam_repair_threads variable */
|
|
|
|
my_snprintf(buf, 40, "Repair with %d threads", my_count_bits(key_map));
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, buf);
|
2003-05-04 18:43:37 +02:00
|
|
|
error = mi_repair_parallel(¶m, file, fixed_name,
|
|
|
|
param.testflag & T_QUICK);
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Repair done"); // to reset proc_info, as
|
2003-05-04 18:43:37 +02:00
|
|
|
// it was pointing to local buffer
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Repair by sorting");
|
2003-05-04 18:43:37 +02:00
|
|
|
error = mi_repair_by_sort(¶m, file, fixed_name,
|
|
|
|
param.testflag & T_QUICK);
|
|
|
|
}
|
2000-09-12 02:02:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Repair with keycache");
|
2000-10-17 15:19:24 +02:00
|
|
|
param.testflag &= ~T_REP_BY_SORT;
|
2002-03-13 18:20:17 +01:00
|
|
|
error= mi_repair(¶m, file, fixed_name,
|
2002-03-15 20:30:50 +01:00
|
|
|
param.testflag & T_QUICK);
|
2000-09-12 02:02:33 +02:00
|
|
|
}
|
2011-10-20 13:03:22 +02:00
|
|
|
#ifdef HAVE_MMAP
|
|
|
|
if (remap)
|
|
|
|
mi_dynmap_file(file, file->state->data_file_length);
|
|
|
|
#endif
|
2001-11-18 13:08:17 +01:00
|
|
|
param.testflag=testflag;
|
|
|
|
optimize_done=1;
|
2000-09-12 02:02:33 +02:00
|
|
|
}
|
|
|
|
if (!error)
|
|
|
|
{
|
2001-11-18 13:08:17 +01:00
|
|
|
if ((local_testflag & T_SORT_INDEX) &&
|
2000-09-12 02:02:33 +02:00
|
|
|
(share->state.changed & STATE_NOT_SORTED_PAGES))
|
|
|
|
{
|
|
|
|
optimize_done=1;
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Sorting index");
|
2000-09-12 02:02:33 +02:00
|
|
|
error=mi_sort_index(¶m,file,fixed_name);
|
|
|
|
}
|
2001-11-18 13:08:17 +01:00
|
|
|
if (!statistics_done && (local_testflag & T_STATISTICS))
|
2000-09-12 02:02:33 +02:00
|
|
|
{
|
2001-11-18 13:08:17 +01:00
|
|
|
if (share->state.changed & STATE_NOT_ANALYZED)
|
|
|
|
{
|
|
|
|
optimize_done=1;
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Analyzing");
|
2001-11-18 13:08:17 +01:00
|
|
|
error = chk_key(¶m, file);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
local_testflag&= ~T_STATISTICS; // Don't update statistics
|
2000-09-12 02:02:33 +02:00
|
|
|
}
|
|
|
|
}
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Saving state");
|
2000-07-31 21:29:14 +02:00
|
|
|
if (!error)
|
2000-08-17 17:30:36 +02:00
|
|
|
{
|
2001-01-16 14:02:25 +01:00
|
|
|
if ((share->state.changed & STATE_CHANGED) || mi_is_crashed(file))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2000-08-18 11:48:00 +02:00
|
|
|
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
|
|
|
|
STATE_CRASHED_ON_REPAIR);
|
2000-07-31 21:29:14 +02:00
|
|
|
file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
|
|
|
|
}
|
2002-11-14 22:39:46 +01:00
|
|
|
/*
|
|
|
|
the following 'if', thought conceptually wrong,
|
|
|
|
is a useful optimization nevertheless.
|
|
|
|
*/
|
2002-12-12 20:01:32 +01:00
|
|
|
if (file->state != &file->s->state.state)
|
2002-11-14 22:39:46 +01:00
|
|
|
file->s->state.state = *file->state;
|
2000-07-31 21:29:14 +02:00
|
|
|
if (file->s->base.auto_key)
|
|
|
|
update_auto_increment_key(¶m, file, 1);
|
2001-11-18 13:08:17 +01:00
|
|
|
if (optimize_done)
|
|
|
|
error = update_state_info(¶m, file,
|
|
|
|
UPDATE_TIME | UPDATE_OPEN_COUNT |
|
|
|
|
(local_testflag &
|
|
|
|
T_STATISTICS ? UPDATE_STAT : 0));
|
2000-08-17 00:05:02 +02:00
|
|
|
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
|
|
|
|
HA_STATUS_CONST);
|
2000-12-12 03:34:56 +01:00
|
|
|
if (rows != file->state->records && ! (param.testflag & T_VERY_SILENT))
|
2000-11-29 04:09:28 +01:00
|
|
|
{
|
|
|
|
char llbuff[22],llbuff2[22];
|
|
|
|
mi_check_print_warning(¶m,"Number of rows changed from %s to %s",
|
|
|
|
llstr(rows,llbuff),
|
|
|
|
llstr(file->state->records,llbuff2));
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2000-10-23 14:35:42 +02:00
|
|
|
else
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2000-10-23 14:35:42 +02:00
|
|
|
mi_mark_crashed_on_repair(file);
|
2000-07-31 21:29:14 +02:00
|
|
|
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
|
2000-10-23 14:35:42 +02:00
|
|
|
update_state_info(¶m, file, 0);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, old_proc_info);
|
2009-12-01 15:39:03 +01:00
|
|
|
if (! thd->locked_tables_mode)
|
2001-04-18 22:47:11 +02:00
|
|
|
mi_lock_database(file,F_UNLCK);
|
2000-10-17 04:29:56 +02:00
|
|
|
DBUG_RETURN(error ? HA_ADMIN_FAILED :
|
|
|
|
!optimize_done ? HA_ADMIN_ALREADY_DONE : HA_ADMIN_OK);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-07-16 21:30:49 +02:00
|
|
|
/*
|
2003-11-18 12:47:27 +01:00
|
|
|
Assign table indexes to a specific key cache.
|
2003-07-16 21:30:49 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
|
|
|
|
{
|
2003-11-20 21:06:25 +01:00
|
|
|
KEY_CACHE *new_key_cache= check_opt->key_cache;
|
2003-11-18 12:47:27 +01:00
|
|
|
const char *errmsg= 0;
|
2003-08-02 11:43:18 +02:00
|
|
|
int error= HA_ADMIN_OK;
|
2007-03-05 18:08:41 +01:00
|
|
|
ulonglong map;
|
2003-07-16 21:30:49 +02:00
|
|
|
TABLE_LIST *table_list= table->pos_in_table_list;
|
|
|
|
DBUG_ENTER("ha_myisam::assign_to_keycache");
|
|
|
|
|
2007-03-05 18:08:41 +01:00
|
|
|
table->keys_in_use_for_query.clear_all();
|
|
|
|
|
|
|
|
if (table_list->process_index_hints(table))
|
2007-06-08 08:20:50 +02:00
|
|
|
DBUG_RETURN(HA_ADMIN_FAILED);
|
2007-03-05 18:08:41 +01:00
|
|
|
map= ~(ulonglong) 0;
|
|
|
|
if (!table->keys_in_use_for_query.is_clear_all())
|
|
|
|
/* use all keys if there's no list specified by the user through hints */
|
|
|
|
map= table->keys_in_use_for_query.to_ulonglong();
|
2003-08-02 11:43:18 +02:00
|
|
|
|
2003-11-18 12:47:27 +01:00
|
|
|
if ((error= mi_assign_to_key_cache(file, map, new_key_cache)))
|
2003-08-02 11:43:18 +02:00
|
|
|
{
|
2005-02-08 23:50:45 +01:00
|
|
|
char buf[STRING_BUFFER_USUAL_SIZE];
|
2003-11-21 00:53:01 +01:00
|
|
|
my_snprintf(buf, sizeof(buf),
|
|
|
|
"Failed to flush to index file (errno: %d)", error);
|
|
|
|
errmsg= buf;
|
2003-08-02 11:43:18 +02:00
|
|
|
error= HA_ADMIN_CORRUPT;
|
2003-07-16 21:30:49 +02:00
|
|
|
}
|
2003-08-02 11:43:18 +02:00
|
|
|
|
2003-11-18 12:47:27 +01:00
|
|
|
if (error != HA_ADMIN_OK)
|
2003-07-16 21:30:49 +02:00
|
|
|
{
|
2003-11-18 12:47:27 +01:00
|
|
|
/* Send error to user */
|
2003-07-16 21:30:49 +02:00
|
|
|
MI_CHECK param;
|
|
|
|
myisamchk_init(¶m);
|
|
|
|
param.thd= thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name= "assign_to_keycache";
|
2005-11-23 21:45:02 +01:00
|
|
|
param.db_name= table->s->db.str;
|
|
|
|
param.table_name= table->s->table_name.str;
|
2003-07-16 21:30:49 +02:00
|
|
|
param.testflag= 0;
|
|
|
|
mi_check_print_error(¶m, errmsg);
|
|
|
|
}
|
2003-08-02 11:43:18 +02:00
|
|
|
DBUG_RETURN(error);
|
2003-07-16 21:30:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-06-12 13:29:02 +02:00
|
|
|
/*
|
|
|
|
Preload pages of the index file for a table into the key cache.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
const char *errmsg;
|
2007-03-05 18:08:41 +01:00
|
|
|
ulonglong map;
|
2003-06-12 13:29:02 +02:00
|
|
|
TABLE_LIST *table_list= table->pos_in_table_list;
|
|
|
|
my_bool ignore_leaves= table_list->ignore_leaves;
|
2009-02-05 07:16:00 +01:00
|
|
|
char buf[MYSQL_ERRMSG_SIZE];
|
2003-06-12 13:29:02 +02:00
|
|
|
|
|
|
|
DBUG_ENTER("ha_myisam::preload_keys");
|
|
|
|
|
2007-03-05 18:08:41 +01:00
|
|
|
table->keys_in_use_for_query.clear_all();
|
|
|
|
|
|
|
|
if (table_list->process_index_hints(table))
|
2007-06-08 08:20:50 +02:00
|
|
|
DBUG_RETURN(HA_ADMIN_FAILED);
|
2003-10-11 13:06:55 +02:00
|
|
|
|
2007-03-05 18:08:41 +01:00
|
|
|
map= ~(ulonglong) 0;
|
|
|
|
/* Check validity of the index references */
|
|
|
|
if (!table->keys_in_use_for_query.is_clear_all())
|
|
|
|
/* use all keys if there's no list specified by the user through hints */
|
|
|
|
map= table->keys_in_use_for_query.to_ulonglong();
|
|
|
|
|
2003-06-12 13:29:02 +02:00
|
|
|
mi_extra(file, HA_EXTRA_PRELOAD_BUFFER_SIZE,
|
|
|
|
(void *) &thd->variables.preload_buff_size);
|
|
|
|
|
|
|
|
if ((error= mi_preload(file, map, ignore_leaves)))
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case HA_ERR_NON_UNIQUE_BLOCK_SIZE:
|
|
|
|
errmsg= "Indexes use different block sizes";
|
|
|
|
break;
|
|
|
|
case HA_ERR_OUT_OF_MEM:
|
|
|
|
errmsg= "Failed to allocate buffer";
|
|
|
|
break;
|
2003-10-11 13:06:55 +02:00
|
|
|
default:
|
2009-02-05 07:16:00 +01:00
|
|
|
my_snprintf(buf, sizeof(buf),
|
2003-06-12 13:29:02 +02:00
|
|
|
"Failed to read from index file (errno: %d)", my_errno);
|
|
|
|
errmsg= buf;
|
|
|
|
}
|
|
|
|
error= HA_ADMIN_FAILED;
|
|
|
|
goto err;
|
|
|
|
}
|
2003-10-11 13:06:55 +02:00
|
|
|
|
2003-06-12 13:29:02 +02:00
|
|
|
DBUG_RETURN(HA_ADMIN_OK);
|
|
|
|
|
|
|
|
err:
|
|
|
|
{
|
|
|
|
MI_CHECK param;
|
|
|
|
myisamchk_init(¶m);
|
|
|
|
param.thd= thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name= "preload_keys";
|
2005-11-23 21:45:02 +01:00
|
|
|
param.db_name= table->s->db.str;
|
|
|
|
param.table_name= table->s->table_name.str;
|
2005-01-06 12:00:13 +01:00
|
|
|
param.testflag= 0;
|
2003-06-12 13:29:02 +02:00
|
|
|
mi_check_print_error(¶m, errmsg);
|
|
|
|
DBUG_RETURN(error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-05-06 15:53:01 +02:00
|
|
|
|
2002-09-18 20:04:49 +02:00
|
|
|
/*
|
2004-05-06 15:53:01 +02:00
|
|
|
Disable indexes, making it persistent if requested.
|
|
|
|
|
2004-04-06 21:35:26 +02:00
|
|
|
SYNOPSIS
|
2004-05-06 15:53:01 +02:00
|
|
|
disable_indexes()
|
|
|
|
mode mode of operation:
|
|
|
|
HA_KEY_SWITCH_NONUNIQ disable all non-unique keys
|
|
|
|
HA_KEY_SWITCH_ALL disable all keys
|
|
|
|
HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent
|
|
|
|
HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent
|
|
|
|
|
|
|
|
IMPLEMENTATION
|
|
|
|
HA_KEY_SWITCH_NONUNIQ is not implemented.
|
|
|
|
HA_KEY_SWITCH_ALL_SAVE is not implemented.
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 ok
|
|
|
|
HA_ERR_WRONG_COMMAND mode not implemented.
|
2004-04-06 21:35:26 +02:00
|
|
|
*/
|
2004-05-06 15:53:01 +02:00
|
|
|
|
|
|
|
int ha_myisam::disable_indexes(uint mode)
|
2004-04-06 21:35:26 +02:00
|
|
|
{
|
2004-05-06 15:53:01 +02:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (mode == HA_KEY_SWITCH_ALL)
|
|
|
|
{
|
|
|
|
/* call a storage engine function to switch the key map */
|
|
|
|
error= mi_disable_indexes(file);
|
|
|
|
}
|
|
|
|
else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
|
|
|
|
{
|
|
|
|
mi_extra(file, HA_EXTRA_NO_KEYS, 0);
|
|
|
|
info(HA_STATUS_CONST); // Read new key info
|
|
|
|
error= 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* mode not implemented */
|
|
|
|
error= HA_ERR_WRONG_COMMAND;
|
|
|
|
}
|
|
|
|
return error;
|
2004-04-06 21:35:26 +02:00
|
|
|
}
|
|
|
|
|
2004-05-06 15:53:01 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Enable indexes, making it persistent if requested.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
enable_indexes()
|
|
|
|
mode mode of operation:
|
|
|
|
HA_KEY_SWITCH_NONUNIQ enable all non-unique keys
|
|
|
|
HA_KEY_SWITCH_ALL enable all keys
|
|
|
|
HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent
|
|
|
|
HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
Enable indexes, which might have been disabled by disable_index() before.
|
|
|
|
The modes without _SAVE work only if both data and indexes are empty,
|
|
|
|
since the MyISAM repair would enable them persistently.
|
|
|
|
To be sure in these cases, call handler::delete_all_rows() before.
|
|
|
|
|
|
|
|
IMPLEMENTATION
|
|
|
|
HA_KEY_SWITCH_NONUNIQ is not implemented.
|
|
|
|
HA_KEY_SWITCH_ALL_SAVE is not implemented.
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 ok
|
|
|
|
!=0 Error, among others:
|
|
|
|
HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry.
|
|
|
|
HA_ERR_WRONG_COMMAND mode not implemented.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_myisam::enable_indexes(uint mode)
|
2004-04-06 21:35:26 +02:00
|
|
|
{
|
2004-05-06 15:53:01 +02:00
|
|
|
int error;
|
|
|
|
|
2009-04-16 13:32:56 +02:00
|
|
|
DBUG_EXECUTE_IF("wait_in_enable_indexes",
|
|
|
|
debug_wait_for_kill("wait_in_enable_indexes"); );
|
|
|
|
|
2005-07-19 14:13:56 +02:00
|
|
|
if (mi_is_all_keys_active(file->s->state.key_map, file->s->base.keys))
|
2004-05-06 15:53:01 +02:00
|
|
|
{
|
|
|
|
/* All indexes are enabled already. */
|
2004-04-06 21:35:26 +02:00
|
|
|
return 0;
|
2004-05-06 15:53:01 +02:00
|
|
|
}
|
2004-04-06 21:35:26 +02:00
|
|
|
|
2004-05-06 15:53:01 +02:00
|
|
|
if (mode == HA_KEY_SWITCH_ALL)
|
|
|
|
{
|
|
|
|
error= mi_enable_indexes(file);
|
|
|
|
/*
|
|
|
|
Do not try to repair on error,
|
|
|
|
as this could make the enabled state persistent,
|
|
|
|
but mode==HA_KEY_SWITCH_ALL forbids it.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
|
|
|
|
{
|
|
|
|
THD *thd=current_thd;
|
|
|
|
MI_CHECK param;
|
|
|
|
const char *save_proc_info=thd->proc_info;
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, "Creating index");
|
2004-05-06 15:53:01 +02:00
|
|
|
myisamchk_init(¶m);
|
2005-01-06 12:00:13 +01:00
|
|
|
param.op_name= "recreating_index";
|
|
|
|
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
|
|
|
|
T_CREATE_MISSING_KEYS);
|
2004-05-06 15:53:01 +02:00
|
|
|
param.myf_rw&= ~MY_WAIT_IF_FULL;
|
2009-12-22 10:35:56 +01:00
|
|
|
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
|
|
|
|
param.stats_method= (enum_mi_stats_method)THDVAR(thd, stats_method);
|
2004-05-06 15:53:01 +02:00
|
|
|
param.tmpdir=&mysql_tmpdir_list;
|
2004-10-14 20:02:56 +02:00
|
|
|
if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair)
|
|
|
|
{
|
2007-05-04 19:17:07 +02:00
|
|
|
sql_print_warning("Warning: Enabling keys got errno %d on %s.%s, retrying",
|
|
|
|
my_errno, param.db_name, param.table_name);
|
2010-03-02 10:45:50 +01:00
|
|
|
/*
|
|
|
|
Repairing by sort failed. Now try standard repair method.
|
|
|
|
Still we want to fix only index file. If data file corruption
|
|
|
|
was detected (T_RETRY_WITHOUT_QUICK), we shouldn't do much here.
|
|
|
|
Let implicit repair do this job.
|
|
|
|
*/
|
|
|
|
if (!(param.testflag & T_RETRY_WITHOUT_QUICK))
|
|
|
|
{
|
|
|
|
param.testflag&= ~T_REP_BY_SORT;
|
|
|
|
error= (repair(thd,param,0) != HA_ADMIN_OK);
|
|
|
|
}
|
2005-10-26 14:01:54 +02:00
|
|
|
/*
|
|
|
|
If the standard repair succeeded, clear all error messages which
|
|
|
|
might have been set by the first repair. They can still be seen
|
|
|
|
with SHOW WARNINGS then.
|
|
|
|
*/
|
|
|
|
if (! error)
|
|
|
|
thd->clear_error();
|
2004-10-14 20:02:56 +02:00
|
|
|
}
|
2004-05-06 15:53:01 +02:00
|
|
|
info(HA_STATUS_CONST);
|
2007-02-22 16:03:08 +01:00
|
|
|
thd_proc_info(thd, save_proc_info);
|
2004-05-06 15:53:01 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* mode not implemented */
|
|
|
|
error= HA_ERR_WRONG_COMMAND;
|
|
|
|
}
|
2004-04-06 21:35:26 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2004-05-06 15:53:01 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Test if indexes are disabled.
|
|
|
|
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
indexes_are_disabled()
|
|
|
|
no parameters
|
|
|
|
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 indexes are not disabled
|
|
|
|
1 all indexes are disabled
|
|
|
|
[2 non-unique indexes are disabled - NOT YET IMPLEMENTED]
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_myisam::indexes_are_disabled(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return mi_indexes_are_disabled(file);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-18 20:04:49 +02:00
|
|
|
/*
|
2004-04-06 21:35:26 +02:00
|
|
|
prepare for a many-rows insert operation
|
|
|
|
e.g. - disable indexes (if they can be recreated fast) or
|
|
|
|
activate special bulk-insert optimizations
|
2002-09-18 20:04:49 +02:00
|
|
|
|
|
|
|
SYNOPSIS
|
2004-04-06 21:35:26 +02:00
|
|
|
start_bulk_insert(rows)
|
|
|
|
rows Rows to be inserted
|
|
|
|
0 if we don't know
|
2004-08-09 11:02:09 +02:00
|
|
|
|
|
|
|
NOTICE
|
|
|
|
Do not forget to call end_bulk_insert() later!
|
2002-09-18 20:04:49 +02:00
|
|
|
*/
|
2000-08-15 19:09:37 +02:00
|
|
|
|
2004-04-06 21:35:26 +02:00
|
|
|
void ha_myisam::start_bulk_insert(ha_rows rows)
|
2000-08-15 19:09:37 +02:00
|
|
|
{
|
2004-08-09 11:02:09 +02:00
|
|
|
DBUG_ENTER("ha_myisam::start_bulk_insert");
|
2005-01-06 12:00:13 +01:00
|
|
|
THD *thd= current_thd;
|
|
|
|
ulong size= min(thd->variables.read_buff_size,
|
2007-09-21 10:15:16 +02:00
|
|
|
(ulong) (table->s->avg_row_length*rows));
|
2004-08-09 11:02:09 +02:00
|
|
|
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
|
|
|
|
(ulong) rows, size));
|
2004-04-07 16:04:28 +02:00
|
|
|
|
2004-04-08 16:56:45 +02:00
|
|
|
/* don't enable row cache if too few rows */
|
2004-08-09 11:02:09 +02:00
|
|
|
if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE))
|
2004-04-08 16:56:45 +02:00
|
|
|
mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size);
|
2004-04-07 16:04:28 +02:00
|
|
|
|
2005-07-19 14:13:56 +02:00
|
|
|
can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map,
|
|
|
|
file->s->base.keys);
|
2004-04-07 16:04:28 +02:00
|
|
|
|
2004-04-06 21:35:26 +02:00
|
|
|
if (!(specialflag & SPECIAL_SAFE_MODE))
|
2001-05-24 13:14:25 +02:00
|
|
|
{
|
2004-04-06 21:35:26 +02:00
|
|
|
/*
|
|
|
|
Only disable old index if the table was empty and we are inserting
|
|
|
|
a lot of rows.
|
2009-04-16 13:32:56 +02:00
|
|
|
Note that in end_bulk_insert() we may truncate the table if
|
|
|
|
enable_indexes() failed, thus it's essential that indexes are
|
|
|
|
disabled ONLY for an empty table.
|
2004-04-06 21:35:26 +02:00
|
|
|
*/
|
|
|
|
if (file->state->records == 0 && can_enable_indexes &&
|
|
|
|
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES))
|
|
|
|
mi_disable_non_unique_index(file,rows);
|
|
|
|
else
|
|
|
|
if (!file->bulk_insert &&
|
|
|
|
(!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT))
|
2002-07-23 17:31:22 +02:00
|
|
|
{
|
2004-04-07 16:04:28 +02:00
|
|
|
mi_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows);
|
2002-07-23 17:31:22 +02:00
|
|
|
}
|
2001-05-24 13:14:25 +02:00
|
|
|
}
|
2004-08-09 11:02:09 +02:00
|
|
|
DBUG_VOID_RETURN;
|
2000-08-15 19:09:37 +02:00
|
|
|
}
|
|
|
|
|
2004-08-09 11:02:09 +02:00
|
|
|
/*
|
|
|
|
end special bulk-insert optimizations,
|
|
|
|
which have been activated by start_bulk_insert().
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
end_bulk_insert()
|
|
|
|
no arguments
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 OK
|
|
|
|
!= 0 Error
|
|
|
|
*/
|
|
|
|
|
2004-04-06 21:35:26 +02:00
|
|
|
int ha_myisam::end_bulk_insert()
|
2000-08-15 19:09:37 +02:00
|
|
|
{
|
2002-12-07 22:40:20 +01:00
|
|
|
mi_end_bulk_insert(file);
|
2004-04-07 16:04:28 +02:00
|
|
|
int err=mi_extra(file, HA_EXTRA_NO_CACHE, 0);
|
2009-04-16 13:32:56 +02:00
|
|
|
if (!err)
|
|
|
|
{
|
|
|
|
if (can_enable_indexes)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Truncate the table when enable index operation is killed.
|
|
|
|
After truncating the table we don't need to enable the
|
|
|
|
indexes, because the last repair operation is aborted after
|
|
|
|
setting the indexes as active and trying to recreate them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (((err= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)) != 0) &&
|
|
|
|
current_thd->killed)
|
|
|
|
{
|
|
|
|
delete_all_rows();
|
|
|
|
/* not crashed, despite being killed during repair */
|
|
|
|
file->s->state.changed&= ~(STATE_CRASHED|STATE_CRASHED_ON_REPAIR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err;
|
2000-08-15 19:09:37 +02:00
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2000-10-03 13:18:03 +02:00
|
|
|
|
2000-10-10 23:06:37 +02:00
|
|
|
bool ha_myisam::check_and_repair(THD *thd)
|
2000-10-03 13:18:03 +02:00
|
|
|
{
|
|
|
|
int error=0;
|
2000-10-23 14:35:42 +02:00
|
|
|
int marked_crashed;
|
2000-10-03 13:18:03 +02:00
|
|
|
HA_CHECK_OPT check_opt;
|
2004-06-25 17:49:36 +02:00
|
|
|
DBUG_ENTER("ha_myisam::check_and_repair");
|
2000-10-03 13:18:03 +02:00
|
|
|
|
|
|
|
check_opt.init();
|
2000-10-16 23:47:15 +02:00
|
|
|
check_opt.flags= T_MEDIUM | T_AUTO_REPAIR;
|
|
|
|
// Don't use quick if deleted rows
|
|
|
|
if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK))
|
2002-03-13 18:20:17 +01:00
|
|
|
check_opt.flags|=T_QUICK;
|
2006-11-21 21:32:58 +01:00
|
|
|
sql_print_warning("Checking table: '%s'",table->s->path.str);
|
2004-06-25 17:49:36 +02:00
|
|
|
|
2010-11-18 15:08:32 +01:00
|
|
|
const CSET_STRING query_backup= thd->query_string;
|
2009-07-24 18:04:55 +02:00
|
|
|
thd->set_query(table->s->table_name.str,
|
2010-11-18 15:08:32 +01:00
|
|
|
(uint) table->s->table_name.length, system_charset_info);
|
2004-06-25 17:49:36 +02:00
|
|
|
|
|
|
|
if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt))
|
2000-10-03 13:18:03 +02:00
|
|
|
{
|
2006-11-21 21:32:58 +01:00
|
|
|
sql_print_warning("Recovering table: '%s'",table->s->path.str);
|
2002-03-13 18:20:17 +01:00
|
|
|
check_opt.flags=
|
2002-03-15 20:30:50 +01:00
|
|
|
((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) |
|
|
|
|
(marked_crashed ? 0 : T_QUICK) |
|
|
|
|
(myisam_recover_options & HA_RECOVER_FORCE ? 0 : T_SAFE_REPAIR) |
|
|
|
|
T_AUTO_REPAIR);
|
2000-10-03 13:18:03 +02:00
|
|
|
if (repair(thd, &check_opt))
|
|
|
|
error=1;
|
|
|
|
}
|
2010-11-18 15:08:32 +01:00
|
|
|
thd->set_query(query_backup);
|
2000-10-03 13:18:03 +02:00
|
|
|
DBUG_RETURN(error);
|
|
|
|
}
|
|
|
|
|
2000-10-10 23:06:37 +02:00
|
|
|
bool ha_myisam::is_crashed() const
|
|
|
|
{
|
|
|
|
return (file->s->state.changed & STATE_CRASHED ||
|
|
|
|
(my_disable_locking && file->s->state.open_count));
|
|
|
|
}
|
2000-10-03 13:18:03 +02:00
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_update_count);
|
2004-10-01 16:54:06 +02:00
|
|
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
|
|
|
table->timestamp_field->set_time();
|
2000-07-31 21:29:14 +02:00
|
|
|
return mi_update(file,old_data,new_data);
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::delete_row(const uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_delete_count);
|
2000-07-31 21:29:14 +02:00
|
|
|
return mi_delete(file,buf);
|
|
|
|
}
|
|
|
|
|
2007-08-13 15:11:25 +02:00
|
|
|
int ha_myisam::index_read_map(uchar *buf, const uchar *key,
|
|
|
|
key_part_map keypart_map,
|
|
|
|
enum ha_rkey_function find_flag)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_key_count);
|
2007-01-29 10:40:26 +01:00
|
|
|
int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag);
|
2000-07-31 21:29:14 +02:00
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-08-13 15:11:25 +02:00
|
|
|
int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
|
|
|
|
key_part_map keypart_map,
|
|
|
|
enum ha_rkey_function find_flag)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_key_count);
|
2007-01-29 10:40:26 +01:00
|
|
|
int error=mi_rkey(file, buf, index, key, keypart_map, find_flag);
|
2000-07-31 21:29:14 +02:00
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-08-13 15:11:25 +02:00
|
|
|
int ha_myisam::index_read_last_map(uchar *buf, const uchar *key,
|
|
|
|
key_part_map keypart_map)
|
2002-01-12 14:42:54 +01:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2006-07-15 09:38:34 +02:00
|
|
|
DBUG_ENTER("ha_myisam::index_read_last");
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_key_count);
|
2007-01-29 10:40:26 +01:00
|
|
|
int error=mi_rkey(file, buf, active_index, key, keypart_map,
|
|
|
|
HA_READ_PREFIX_LAST);
|
2002-01-12 14:42:54 +01:00
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2006-07-15 09:38:34 +02:00
|
|
|
DBUG_RETURN(error);
|
2002-01-12 14:42:54 +01:00
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::index_next(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_next_count);
|
2000-07-31 21:29:14 +02:00
|
|
|
int error=mi_rnext(file,buf,active_index);
|
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::index_prev(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_prev_count);
|
2000-07-31 21:29:14 +02:00
|
|
|
int error=mi_rprev(file,buf, active_index);
|
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
2000-08-17 17:30:36 +02:00
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::index_first(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_first_count);
|
2000-07-31 21:29:14 +02:00
|
|
|
int error=mi_rfirst(file, buf, active_index);
|
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::index_last(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_last_count);
|
2000-07-31 21:29:14 +02:00
|
|
|
int error=mi_rlast(file, buf, active_index);
|
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::index_next_same(uchar *buf,
|
|
|
|
const uchar *key __attribute__((unused)),
|
2000-07-31 21:29:14 +02:00
|
|
|
uint length __attribute__((unused)))
|
|
|
|
{
|
2007-11-26 16:58:54 +01:00
|
|
|
int error;
|
2004-06-23 12:29:05 +02:00
|
|
|
DBUG_ASSERT(inited==INDEX);
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_next_count);
|
2007-11-26 16:58:54 +01:00
|
|
|
do
|
|
|
|
{
|
|
|
|
error= mi_rnext_same(file,buf);
|
|
|
|
} while (error == HA_ERR_RECORD_DELETED);
|
2000-07-31 21:29:14 +02:00
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_INDEX_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int ha_myisam::rnd_init(bool scan)
|
|
|
|
{
|
|
|
|
if (scan)
|
|
|
|
return mi_scan_init(file);
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
return mi_reset(file); // Free buffers
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::rnd_next(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str,
|
|
|
|
TRUE);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
2000-07-31 21:29:14 +02:00
|
|
|
int error=mi_scan(file, buf);
|
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::restart_rnd_next(uchar *buf, uchar *pos)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
return rnd_pos(buf,pos);
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::rnd_pos(uchar *buf, uchar *pos)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str,
|
|
|
|
FALSE);
|
2007-03-02 17:43:45 +01:00
|
|
|
ha_statistic_increment(&SSV::ha_read_rnd_count);
|
2004-11-07 22:39:27 +01:00
|
|
|
int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length));
|
2000-07-31 21:29:14 +02:00
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
2008-12-20 11:01:41 +01:00
|
|
|
MYSQL_READ_ROW_DONE(error);
|
2000-07-31 21:29:14 +02:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
void ha_myisam::position(const uchar *record)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
my_off_t row_position= mi_position(file);
|
|
|
|
my_store_ptr(ref, ref_length, row_position);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2006-08-10 16:55:20 +02:00
|
|
|
int ha_myisam::info(uint flag)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
MI_ISAMINFO misam_info;
|
2001-06-01 03:27:59 +02:00
|
|
|
char name_buff[FN_REFLEN];
|
|
|
|
|
2006-12-14 23:51:37 +01:00
|
|
|
(void) mi_status(file,&misam_info,flag);
|
2000-07-31 21:29:14 +02:00
|
|
|
if (flag & HA_STATUS_VARIABLE)
|
|
|
|
{
|
2007-01-27 02:46:45 +01:00
|
|
|
stats.records= misam_info.records;
|
|
|
|
stats.deleted= misam_info.deleted;
|
|
|
|
stats.data_file_length= misam_info.data_file_length;
|
|
|
|
stats.index_file_length= misam_info.index_file_length;
|
|
|
|
stats.delete_length= misam_info.delete_length;
|
2009-05-13 12:11:24 +02:00
|
|
|
stats.check_time= (ulong) misam_info.check_time;
|
2007-01-27 02:46:45 +01:00
|
|
|
stats.mean_rec_length= misam_info.mean_reclength;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
if (flag & HA_STATUS_CONST)
|
|
|
|
{
|
2005-01-06 12:00:13 +01:00
|
|
|
TABLE_SHARE *share= table->s;
|
2007-01-27 02:46:45 +01:00
|
|
|
stats.max_data_file_length= misam_info.max_data_file_length;
|
|
|
|
stats.max_index_file_length= misam_info.max_index_file_length;
|
2009-05-13 12:11:24 +02:00
|
|
|
stats.create_time= (ulong) misam_info.create_time;
|
2006-12-14 23:51:37 +01:00
|
|
|
ref_length= misam_info.reflength;
|
|
|
|
share->db_options_in_use= misam_info.options;
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
stats.block_size= myisam_block_size; /* record block size */
|
2005-11-23 21:45:02 +01:00
|
|
|
|
|
|
|
/* Update share */
|
|
|
|
if (share->tmp_table == NO_TMP_TABLE)
|
2010-02-03 14:43:03 +01:00
|
|
|
mysql_mutex_lock(&share->LOCK_ha_data);
|
2005-01-06 12:00:13 +01:00
|
|
|
share->keys_in_use.set_prefix(share->keys);
|
2006-12-14 23:51:37 +01:00
|
|
|
share->keys_in_use.intersect_extended(misam_info.key_map);
|
2005-01-06 12:00:13 +01:00
|
|
|
share->keys_for_keyread.intersect(share->keys_in_use);
|
2006-12-14 23:51:37 +01:00
|
|
|
share->db_record_offset= misam_info.record_offset;
|
2005-01-06 12:00:13 +01:00
|
|
|
if (share->key_parts)
|
2000-07-31 21:29:14 +02:00
|
|
|
memcpy((char*) table->key_info[0].rec_per_key,
|
2006-12-14 23:51:37 +01:00
|
|
|
(char*) misam_info.rec_per_key,
|
2009-05-27 12:34:21 +02:00
|
|
|
sizeof(table->key_info[0].rec_per_key[0])*share->key_parts);
|
2005-11-23 21:45:02 +01:00
|
|
|
if (share->tmp_table == NO_TMP_TABLE)
|
2010-02-03 14:43:03 +01:00
|
|
|
mysql_mutex_unlock(&share->LOCK_ha_data);
|
2005-11-23 21:45:02 +01:00
|
|
|
|
2001-06-01 03:27:59 +02:00
|
|
|
/*
|
|
|
|
Set data_file_name and index_file_name to point at the symlink value
|
2001-06-05 02:38:10 +02:00
|
|
|
if table is symlinked (Ie; Real name is not same as generated name)
|
2001-06-01 03:27:59 +02:00
|
|
|
*/
|
2005-11-23 21:45:02 +01:00
|
|
|
data_file_name= index_file_name= 0;
|
2006-12-28 06:42:04 +01:00
|
|
|
fn_format(name_buff, file->filename, "", MI_NAME_DEXT,
|
|
|
|
MY_APPEND_EXT | MY_UNPACK_FILENAME);
|
2006-12-14 23:51:37 +01:00
|
|
|
if (strcmp(name_buff, misam_info.data_file_name))
|
2007-01-27 02:46:45 +01:00
|
|
|
data_file_name=misam_info.data_file_name;
|
2006-12-28 06:42:04 +01:00
|
|
|
fn_format(name_buff, file->filename, "", MI_NAME_IEXT,
|
|
|
|
MY_APPEND_EXT | MY_UNPACK_FILENAME);
|
2006-12-14 23:51:37 +01:00
|
|
|
if (strcmp(name_buff, misam_info.index_file_name))
|
2007-01-27 02:46:45 +01:00
|
|
|
index_file_name=misam_info.index_file_name;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
if (flag & HA_STATUS_ERRKEY)
|
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
errkey = misam_info.errkey;
|
2007-01-27 02:46:45 +01:00
|
|
|
my_store_ptr(dup_ref, ref_length, misam_info.dupp_key_pos);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
if (flag & HA_STATUS_TIME)
|
2009-05-13 12:11:24 +02:00
|
|
|
stats.update_time = (ulong) misam_info.update_time;
|
2000-07-31 21:29:14 +02:00
|
|
|
if (flag & HA_STATUS_AUTO)
|
2007-01-27 02:46:45 +01:00
|
|
|
stats.auto_increment_value= misam_info.auto_increment;
|
2006-08-10 16:55:20 +02:00
|
|
|
|
|
|
|
return 0;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int ha_myisam::extra(enum ha_extra_function operation)
|
|
|
|
{
|
2002-07-23 17:31:22 +02:00
|
|
|
if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_KEYREAD)
|
|
|
|
return 0;
|
2008-11-20 16:16:20 +01:00
|
|
|
if (operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap)
|
|
|
|
return 0;
|
2002-07-23 17:31:22 +02:00
|
|
|
return mi_extra(file, operation, 0);
|
|
|
|
}
|
|
|
|
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
int ha_myisam::reset(void)
|
|
|
|
{
|
|
|
|
return mi_reset(file);
|
|
|
|
}
|
2002-07-23 17:31:22 +02:00
|
|
|
|
2002-12-07 22:40:20 +01:00
|
|
|
/* To be used with WRITE_CACHE and EXTRA_CACHE */
|
2002-07-23 17:31:22 +02:00
|
|
|
|
|
|
|
int ha_myisam::extra_opt(enum ha_extra_function operation, ulong cache_size)
|
|
|
|
{
|
2002-12-12 20:01:32 +01:00
|
|
|
if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE)
|
2000-07-31 21:29:14 +02:00
|
|
|
return 0;
|
2002-07-23 17:31:22 +02:00
|
|
|
return mi_extra(file, operation, (void*) &cache_size);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int ha_myisam::delete_all_rows()
|
|
|
|
{
|
|
|
|
return mi_delete_all_rows(file);
|
|
|
|
}
|
|
|
|
|
Bug#49938: Failing assertion: inode or deadlock in fsp/fsp0fsp.c
Bug#54678: InnoDB, TRUNCATE, ALTER, I_S SELECT, crash or deadlock
- Incompatible change: truncate no longer resorts to a row by
row delete if the storage engine does not support the truncate
method. Consequently, the count of affected rows does not, in
any case, reflect the actual number of rows.
- Incompatible change: it is no longer possible to truncate a
table that participates as a parent in a foreign key constraint,
unless it is a self-referencing constraint (both parent and child
are in the same table). To work around this incompatible change
and still be able to truncate such tables, disable foreign checks
with SET foreign_key_checks=0 before truncate. Alternatively, if
foreign key checks are necessary, please use a DELETE statement
without a WHERE condition.
Problem description:
The problem was that for storage engines that do not support
truncate table via a external drop and recreate, such as InnoDB
which implements truncate via a internal drop and recreate, the
delete_all_rows method could be invoked with a shared metadata
lock, causing problems if the engine needed exclusive access
to some internal metadata. This problem originated with the
fact that there is no truncate specific handler method, which
ended up leading to a abuse of the delete_all_rows method that
is primarily used for delete operations without a condition.
Solution:
The solution is to introduce a truncate handler method that is
invoked when the engine does not support truncation via a table
drop and recreate. This method is invoked under a exclusive
metadata lock, so that there is only a single instance of the
table when the method is invoked.
Also, the method is not invoked and a error is thrown if
the table is a parent in a non-self-referencing foreign key
relationship. This was necessary to avoid inconsistency as
some integrity checks are bypassed. This is inline with the
fact that truncate is primarily a DDL operation that was
designed to quickly remove all data from a table.
2010-10-06 16:34:28 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Intended to support partitioning.
|
|
|
|
Allows a particular partition to be truncated.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_myisam::truncate()
|
|
|
|
{
|
|
|
|
int error= delete_all_rows();
|
|
|
|
return error ? error : reset_auto_increment(0);
|
|
|
|
}
|
|
|
|
|
2009-07-08 14:11:34 +02:00
|
|
|
int ha_myisam::reset_auto_increment(ulonglong value)
|
|
|
|
{
|
|
|
|
file->s->state.auto_increment= value;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
int ha_myisam::delete_table(const char *name)
|
|
|
|
{
|
|
|
|
return mi_delete_table(name);
|
|
|
|
}
|
|
|
|
|
2004-10-06 00:24:21 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
int ha_myisam::external_lock(THD *thd, int lock_type)
|
|
|
|
{
|
2009-11-25 13:25:01 +01:00
|
|
|
file->in_use.data= thd;
|
2005-01-06 12:00:13 +01:00
|
|
|
return mi_lock_database(file, !table->s->tmp_table ?
|
2003-07-14 09:40:58 +02:00
|
|
|
lock_type : ((lock_type == F_UNLCK) ?
|
2003-07-19 15:17:29 +02:00
|
|
|
F_UNLCK : F_EXTRA_LCK));
|
2000-08-17 17:30:36 +02:00
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
THR_LOCK_DATA **ha_myisam::store_lock(THD *thd,
|
|
|
|
THR_LOCK_DATA **to,
|
|
|
|
enum thr_lock_type lock_type)
|
|
|
|
{
|
|
|
|
if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK)
|
|
|
|
file->lock.type=lock_type;
|
|
|
|
*to++= &file->lock;
|
|
|
|
return to;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
|
|
|
|
{
|
2003-08-25 16:19:44 +02:00
|
|
|
ha_myisam::info(HA_STATUS_AUTO | HA_STATUS_CONST);
|
2000-07-31 21:29:14 +02:00
|
|
|
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
|
|
|
{
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
create_info->auto_increment_value= stats.auto_increment_value;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2001-06-01 03:27:59 +02:00
|
|
|
create_info->data_file_name=data_file_name;
|
|
|
|
create_info->index_file_name=index_file_name;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-11-07 02:54:00 +01:00
|
|
|
int ha_myisam::create(const char *name, register TABLE *table_arg,
|
2006-12-14 23:51:37 +01:00
|
|
|
HA_CREATE_INFO *ha_create_info)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
int error;
|
2007-01-31 15:57:54 +01:00
|
|
|
uint create_flags= 0, records, i;
|
2000-07-31 21:29:14 +02:00
|
|
|
char buff[FN_REFLEN];
|
|
|
|
MI_KEYDEF *keydef;
|
2007-01-31 13:15:20 +01:00
|
|
|
MI_COLUMNDEF *recinfo;
|
2000-07-31 21:29:14 +02:00
|
|
|
MI_CREATE_INFO create_info;
|
2005-11-23 21:45:02 +01:00
|
|
|
TABLE_SHARE *share= table_arg->s;
|
2005-01-06 12:00:13 +01:00
|
|
|
uint options= share->db_options_in_use;
|
2000-07-31 21:29:14 +02:00
|
|
|
DBUG_ENTER("ha_myisam::create");
|
2007-01-31 15:57:54 +01:00
|
|
|
for (i= 0; i < share->keys; i++)
|
2003-03-07 11:36:52 +01:00
|
|
|
{
|
2007-01-31 15:57:54 +01:00
|
|
|
if (table_arg->key_info[i].flags & HA_USES_PARSER)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2007-01-31 15:57:54 +01:00
|
|
|
create_flags|= HA_CREATE_RELIES_ON_SQL_LAYER;
|
2000-07-31 21:29:14 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-01-31 13:15:20 +01:00
|
|
|
if ((error= table2myisam(table_arg, &keydef, &recinfo, &records)))
|
|
|
|
DBUG_RETURN(error); /* purecov: inspected */
|
|
|
|
bzero((char*) &create_info, sizeof(create_info));
|
2005-01-06 12:00:13 +01:00
|
|
|
create_info.max_rows= share->max_rows;
|
|
|
|
create_info.reloc_rows= share->min_rows;
|
2007-01-31 14:09:58 +01:00
|
|
|
create_info.with_auto_increment= share->next_number_key_offset == 0;
|
2007-01-27 02:46:45 +01:00
|
|
|
create_info.auto_increment= (ha_create_info->auto_increment_value ?
|
|
|
|
ha_create_info->auto_increment_value -1 :
|
2007-01-31 13:15:20 +01:00
|
|
|
(ulonglong) 0);
|
2005-01-06 12:00:13 +01:00
|
|
|
create_info.data_file_length= ((ulonglong) share->max_rows *
|
2007-01-31 14:09:58 +01:00
|
|
|
share->avg_row_length);
|
2007-02-05 12:31:20 +01:00
|
|
|
create_info.data_file_name= ha_create_info->data_file_name;
|
2006-12-14 23:51:37 +01:00
|
|
|
create_info.index_file_name= ha_create_info->index_file_name;
|
2008-01-24 18:56:42 +01:00
|
|
|
create_info.language= share->table_charset->number;
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2006-12-14 23:51:37 +01:00
|
|
|
if (ha_create_info->options & HA_LEX_CREATE_TMP_TABLE)
|
2005-03-03 19:51:29 +01:00
|
|
|
create_flags|= HA_CREATE_TMP_TABLE;
|
2007-07-11 09:49:54 +02:00
|
|
|
if (ha_create_info->options & HA_CREATE_KEEP_FILES)
|
|
|
|
create_flags|= HA_CREATE_KEEP_FILES;
|
2005-03-03 19:51:29 +01:00
|
|
|
if (options & HA_OPTION_PACK_RECORD)
|
|
|
|
create_flags|= HA_PACK_RECORD;
|
|
|
|
if (options & HA_OPTION_CHECKSUM)
|
|
|
|
create_flags|= HA_CREATE_CHECKSUM;
|
|
|
|
if (options & HA_OPTION_DELAY_KEY_WRITE)
|
|
|
|
create_flags|= HA_CREATE_DELAY_KEY_WRITE;
|
|
|
|
|
2004-03-10 12:46:11 +01:00
|
|
|
/* TODO: Check that the following fn_format is really needed */
|
2007-01-31 13:15:20 +01:00
|
|
|
error= mi_create(fn_format(buff, name, "", "",
|
2007-01-31 15:57:54 +01:00
|
|
|
MY_UNPACK_FILENAME|MY_APPEND_EXT),
|
2007-01-31 14:09:58 +01:00
|
|
|
share->keys, keydef,
|
2007-01-31 13:15:20 +01:00
|
|
|
records, recinfo,
|
|
|
|
0, (MI_UNIQUEDEF*) 0,
|
|
|
|
&create_info, create_flags);
|
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled
Essentially, the problem is that safemalloc is excruciatingly
slow as it checks all allocated blocks for overrun at each
memory management primitive, yielding a almost exponential
slowdown for the memory management functions (malloc, realloc,
free). The overrun check basically consists of verifying some
bytes of a block for certain magic keys, which catches some
simple forms of overrun. Another minor problem is violation
of aliasing rules and that its own internal list of blocks
is prone to corruption.
Another issue with safemalloc is rather the maintenance cost
as the tool has a significant impact on the server code.
Given the magnitude of memory debuggers available nowadays,
especially those that are provided with the platform malloc
implementation, maintenance of a in-house and largely obsolete
memory debugger becomes a burden that is not worth the effort
due to its slowness and lack of support for detecting more
common forms of heap corruption.
Since there are third-party tools that can provide the same
functionality at a lower or comparable performance cost, the
solution is to simply remove safemalloc. Third-party tools
can provide the same functionality at a lower or comparable
performance cost.
The removal of safemalloc also allows a simplification of the
malloc wrappers, removing quite a bit of kludge: redefinition
of my_malloc, my_free and the removal of the unused second
argument of my_free. Since free() always check whether the
supplied pointer is null, redudant checks are also removed.
Also, this patch adds unit testing for my_malloc and moves
my_realloc implementation into the same file as the other
memory allocation primitives.
2010-07-08 23:20:08 +02:00
|
|
|
my_free(recinfo);
|
2000-07-31 21:29:14 +02:00
|
|
|
DBUG_RETURN(error);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int ha_myisam::rename_table(const char * from, const char * to)
|
|
|
|
{
|
|
|
|
return mi_rename(from,to);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-06-02 22:21:32 +02:00
|
|
|
void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment,
|
|
|
|
ulonglong nb_desired_values,
|
|
|
|
ulonglong *first_value,
|
|
|
|
ulonglong *nb_reserved_values)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2004-09-15 21:10:31 +02:00
|
|
|
ulonglong nr;
|
|
|
|
int error;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar key[MI_MAX_KEY_LENGTH];
|
2004-09-15 21:10:31 +02:00
|
|
|
|
2005-01-06 12:00:13 +01:00
|
|
|
if (!table->s->next_number_key_offset)
|
2000-07-31 21:29:14 +02:00
|
|
|
{ // Autoincrement at key-start
|
|
|
|
ha_myisam::info(HA_STATUS_AUTO);
|
2006-06-04 20:05:22 +02:00
|
|
|
*first_value= stats.auto_increment_value;
|
2006-06-02 22:21:32 +02:00
|
|
|
/* MyISAM has only table-level lock, so reserves to +inf */
|
|
|
|
*nb_reserved_values= ULONGLONG_MAX;
|
|
|
|
return;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2004-04-02 20:42:35 +02:00
|
|
|
/* it's safe to call the following if bulk_insert isn't on */
|
2005-01-06 12:00:13 +01:00
|
|
|
mi_flush_bulk_insert(file, table->s->next_number_index);
|
2002-09-18 20:04:49 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
(void) extra(HA_EXTRA_KEYREAD);
|
2004-08-27 15:37:13 +02:00
|
|
|
key_copy(key, table->record[0],
|
2005-01-06 12:00:13 +01:00
|
|
|
table->key_info + table->s->next_number_index,
|
|
|
|
table->s->next_number_key_offset);
|
2007-01-29 10:40:26 +01:00
|
|
|
error= mi_rkey(file, table->record[1], (int) table->s->next_number_index,
|
|
|
|
key, make_prev_keypart_map(table->s->next_number_keypart),
|
|
|
|
HA_READ_PREFIX_LAST);
|
2000-07-31 21:29:14 +02:00
|
|
|
if (error)
|
2004-09-15 21:10:31 +02:00
|
|
|
nr= 1;
|
2000-07-31 21:29:14 +02:00
|
|
|
else
|
2005-01-06 12:00:13 +01:00
|
|
|
{
|
|
|
|
/* Get data from record[1] */
|
2004-09-15 21:10:31 +02:00
|
|
|
nr= ((ulonglong) table->next_number_field->
|
2005-01-06 12:00:13 +01:00
|
|
|
val_int_offset(table->s->rec_buff_length)+1);
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
extra(HA_EXTRA_NO_KEYREAD);
|
2006-06-02 22:21:32 +02:00
|
|
|
*first_value= nr;
|
|
|
|
/*
|
|
|
|
MySQL needs to call us for next row: assume we are inserting ("a",null)
|
|
|
|
here, we return 3, and next this statement will want to insert ("b",null):
|
|
|
|
there is no reason why ("b",3+1) would be the good row to insert: maybe it
|
|
|
|
already exists, maybe 3+1 is too large...
|
|
|
|
*/
|
|
|
|
*nb_reserved_values= 1;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-03-18 02:16:12 +01:00
|
|
|
/*
|
|
|
|
Find out how many rows there is in the given range
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
records_in_range()
|
|
|
|
inx Index to use
|
2004-05-16 13:48:32 +02:00
|
|
|
min_key Start of range. Null pointer if from first key
|
|
|
|
max_key End of range. Null pointer if to last key
|
2003-03-18 02:16:12 +01:00
|
|
|
|
|
|
|
NOTES
|
2004-05-16 13:48:32 +02:00
|
|
|
min_key.flag can have one of the following values:
|
2003-03-18 02:16:12 +01:00
|
|
|
HA_READ_KEY_EXACT Include the key in the range
|
|
|
|
HA_READ_AFTER_KEY Don't include key in range
|
|
|
|
|
2004-05-16 13:48:32 +02:00
|
|
|
max_key.flag can have one of the following values:
|
2003-03-18 02:16:12 +01:00
|
|
|
HA_READ_BEFORE_KEY Don't include key in range
|
|
|
|
HA_READ_AFTER_KEY Include all 'end_key' values in the range
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
HA_POS_ERROR Something is wrong with the index tree.
|
|
|
|
0 There is no matching keys in the given range
|
|
|
|
number > 0 There is approximately 'number' matching rows in
|
|
|
|
the range.
|
|
|
|
*/
|
|
|
|
|
2004-05-16 13:48:32 +02:00
|
|
|
ha_rows ha_myisam::records_in_range(uint inx, key_range *min_key,
|
|
|
|
key_range *max_key)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2004-05-16 13:48:32 +02:00
|
|
|
return (ha_rows) mi_records_in_range(file, (int) inx, min_key, max_key);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2003-03-18 02:16:12 +01:00
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
int ha_myisam::ft_read(uchar *buf)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2000-08-17 17:30:36 +02:00
|
|
|
if (!ft_handler)
|
|
|
|
return -1;
|
|
|
|
|
2004-11-12 18:04:35 +01:00
|
|
|
thread_safe_increment(table->in_use->status_var.ha_read_next_count,
|
2004-09-13 15:48:01 +02:00
|
|
|
&LOCK_status); // why ?
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2001-10-09 14:53:54 +02:00
|
|
|
error=ft_handler->please->read_next(ft_handler,(char*) buf);
|
2001-01-19 19:25:27 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
table->status=error ? STATUS_NOT_FOUND: 0;
|
|
|
|
return error;
|
|
|
|
}
|
2003-08-05 21:14:15 +02:00
|
|
|
|
|
|
|
uint ha_myisam::checksum() const
|
|
|
|
{
|
2005-09-27 20:11:09 +02:00
|
|
|
return (uint)file->state->checksum;
|
2003-08-05 21:14:15 +02:00
|
|
|
}
|
|
|
|
|
2005-07-22 22:43:59 +02:00
|
|
|
|
|
|
|
bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|
|
|
uint table_changes)
|
|
|
|
{
|
|
|
|
uint options= table->s->db_options_in_use;
|
|
|
|
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
if (info->auto_increment_value != stats.auto_increment_value ||
|
2005-07-22 22:43:59 +02:00
|
|
|
info->data_file_name != data_file_name ||
|
|
|
|
info->index_file_name != index_file_name ||
|
2006-05-15 18:41:04 +02:00
|
|
|
table_changes == IS_EQUAL_NO ||
|
|
|
|
table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet
|
2005-07-22 22:43:59 +02:00
|
|
|
return COMPATIBLE_DATA_NO;
|
|
|
|
|
|
|
|
if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
|
|
|
|
HA_OPTION_DELAY_KEY_WRITE)) !=
|
|
|
|
(info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM |
|
|
|
|
HA_OPTION_DELAY_KEY_WRITE)))
|
|
|
|
return COMPATIBLE_DATA_NO;
|
|
|
|
return COMPATIBLE_DATA_YES;
|
|
|
|
}
|
2006-04-13 22:49:29 +02:00
|
|
|
|
2006-09-30 21:49:46 +02:00
|
|
|
extern int mi_panic(enum ha_panic_function flag);
|
|
|
|
int myisam_panic(handlerton *hton, ha_panic_function flag)
|
|
|
|
{
|
|
|
|
return mi_panic(flag);
|
|
|
|
}
|
2006-05-28 14:51:01 +02:00
|
|
|
|
2006-09-15 19:28:00 +02:00
|
|
|
static int myisam_init(void *p)
|
2006-05-28 14:51:01 +02:00
|
|
|
{
|
2006-09-30 21:49:46 +02:00
|
|
|
handlerton *myisam_hton;
|
|
|
|
|
2009-12-05 02:26:15 +01:00
|
|
|
#ifdef HAVE_PSI_INTERFACE
|
|
|
|
init_myisam_psi_keys();
|
|
|
|
#endif
|
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
/* Set global variables based on startup options */
|
|
|
|
if (myisam_recover_options)
|
|
|
|
ha_open_options|=HA_OPEN_ABORT_IF_CRASHED;
|
|
|
|
else
|
|
|
|
myisam_recover_options= HA_RECOVER_OFF;
|
|
|
|
|
|
|
|
myisam_block_size=(uint) 1 << my_bit_log2(opt_myisam_block_size);
|
|
|
|
|
2006-09-15 19:28:00 +02:00
|
|
|
myisam_hton= (handlerton *)p;
|
2006-09-30 02:19:02 +02:00
|
|
|
myisam_hton->state= SHOW_OPTION_YES;
|
|
|
|
myisam_hton->db_type= DB_TYPE_MYISAM;
|
|
|
|
myisam_hton->create= myisam_create_handler;
|
2006-09-30 21:49:46 +02:00
|
|
|
myisam_hton->panic= myisam_panic;
|
2006-09-28 02:44:55 +02:00
|
|
|
myisam_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES;
|
2006-05-28 14:51:01 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-22 10:35:56 +01:00
|
|
|
static struct st_mysql_sys_var* myisam_sysvars[]= {
|
|
|
|
MYSQL_SYSVAR(block_size),
|
|
|
|
MYSQL_SYSVAR(data_pointer_size),
|
|
|
|
MYSQL_SYSVAR(max_sort_file_size),
|
|
|
|
MYSQL_SYSVAR(recover_options),
|
|
|
|
MYSQL_SYSVAR(repair_threads),
|
|
|
|
MYSQL_SYSVAR(sort_buffer_size),
|
|
|
|
MYSQL_SYSVAR(use_mmap),
|
|
|
|
MYSQL_SYSVAR(mmap_size),
|
|
|
|
MYSQL_SYSVAR(stats_method),
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
2006-05-28 14:51:01 +02:00
|
|
|
struct st_mysql_storage_engine myisam_storage_engine=
|
2006-09-27 06:26:04 +02:00
|
|
|
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
|
2006-04-13 22:49:29 +02:00
|
|
|
|
|
|
|
mysql_declare_plugin(myisam)
|
|
|
|
{
|
|
|
|
MYSQL_STORAGE_ENGINE_PLUGIN,
|
2006-05-28 14:51:01 +02:00
|
|
|
&myisam_storage_engine,
|
|
|
|
"MyISAM",
|
2006-04-13 22:49:29 +02:00
|
|
|
"MySQL AB",
|
2010-07-02 12:56:54 +02:00
|
|
|
"MyISAM storage engine",
|
2006-10-05 09:41:29 +02:00
|
|
|
PLUGIN_LICENSE_GPL,
|
2006-05-28 14:51:01 +02:00
|
|
|
myisam_init, /* Plugin Init */
|
2006-04-13 22:49:29 +02:00
|
|
|
NULL, /* Plugin Deinit */
|
2006-05-04 18:39:47 +02:00
|
|
|
0x0100, /* 1.0 */
|
2006-08-30 23:27:29 +02:00
|
|
|
NULL, /* status variables */
|
2009-12-22 10:35:56 +01:00
|
|
|
myisam_sysvars, /* system variables */
|
2011-08-15 20:12:11 +02:00
|
|
|
NULL,
|
|
|
|
0,
|
2006-04-13 22:49:29 +02:00
|
|
|
}
|
|
|
|
mysql_declare_plugin_end;
|
2006-05-28 14:51:01 +02:00
|
|
|
|
2007-07-16 21:31:36 +02:00
|
|
|
|
2007-07-12 13:29:51 +02:00
|
|
|
#ifdef HAVE_QUERY_CACHE
|
|
|
|
/**
|
|
|
|
@brief Register a named table with a call back function to the query cache.
|
|
|
|
|
|
|
|
@param thd The thread handle
|
|
|
|
@param table_key A pointer to the table name in the table cache
|
|
|
|
@param key_length The length of the table name
|
|
|
|
@param[out] engine_callback The pointer to the storage engine call back
|
|
|
|
function, currently 0
|
|
|
|
@param[out] engine_data Engine data will be set to 0.
|
|
|
|
|
|
|
|
@note Despite the name of this function, it is used to check each statement
|
|
|
|
before it is cached and not to register a table or callback function.
|
|
|
|
|
|
|
|
@see handler::register_query_cache_table
|
|
|
|
|
|
|
|
@return The error code. The engine_data and engine_callback will be set to 0.
|
|
|
|
@retval TRUE Success
|
|
|
|
@retval FALSE An error occured
|
|
|
|
*/
|
|
|
|
|
|
|
|
my_bool ha_myisam::register_query_cache_table(THD *thd, char *table_name,
|
|
|
|
uint table_name_len,
|
|
|
|
qc_engine_callback
|
|
|
|
*engine_callback,
|
|
|
|
ulonglong *engine_data)
|
|
|
|
{
|
2008-03-13 16:39:27 +01:00
|
|
|
DBUG_ENTER("ha_myisam::register_query_cache_table");
|
2007-07-12 13:29:51 +02:00
|
|
|
/*
|
|
|
|
No call back function is needed to determine if a cached statement
|
|
|
|
is valid or not.
|
|
|
|
*/
|
|
|
|
*engine_callback= 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
No engine data is needed.
|
|
|
|
*/
|
|
|
|
*engine_data= 0;
|
|
|
|
|
2008-03-13 16:39:27 +01:00
|
|
|
if (file->s->concurrent_insert)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
If a concurrent INSERT has happened just before the currently
|
|
|
|
processed SELECT statement, the total size of the table is
|
|
|
|
unknown.
|
2007-07-12 13:29:51 +02:00
|
|
|
|
2008-03-13 16:39:27 +01:00
|
|
|
To determine if the table size is known, the current thread's snap
|
|
|
|
shot of the table size with the actual table size are compared.
|
2007-07-12 13:29:51 +02:00
|
|
|
|
2008-03-13 16:39:27 +01:00
|
|
|
If the table size is unknown the SELECT statement can't be cached.
|
2007-07-12 13:29:51 +02:00
|
|
|
|
2008-03-13 16:39:27 +01:00
|
|
|
When concurrent inserts are disabled at table open, mi_open()
|
|
|
|
does not assign a get_status() function. In this case the local
|
|
|
|
("current") status is never updated. We would wrongly think that
|
|
|
|
we cannot cache the statement.
|
|
|
|
*/
|
|
|
|
ulonglong actual_data_file_length;
|
|
|
|
ulonglong current_data_file_length;
|
2007-07-12 13:29:51 +02:00
|
|
|
|
2008-03-13 16:39:27 +01:00
|
|
|
/*
|
|
|
|
POSIX visibility rules specify that "2. Whatever memory values a
|
|
|
|
thread can see when it unlocks a mutex <...> can also be seen by any
|
|
|
|
thread that later locks the same mutex". In this particular case,
|
|
|
|
concurrent insert thread had modified the data_file_length in
|
|
|
|
MYISAM_SHARE before it has unlocked (or even locked)
|
|
|
|
structure_guard_mutex. So, here we're guaranteed to see at least that
|
|
|
|
value after we've locked the same mutex. We can see a later value
|
|
|
|
(modified by some other thread) though, but it's ok, as we only want
|
|
|
|
to know if the variable was changed, the actual new value doesn't matter
|
|
|
|
*/
|
|
|
|
actual_data_file_length= file->s->state.state.data_file_length;
|
|
|
|
current_data_file_length= file->save_state.data_file_length;
|
|
|
|
|
|
|
|
if (current_data_file_length != actual_data_file_length)
|
|
|
|
{
|
|
|
|
/* Don't cache current statement. */
|
|
|
|
DBUG_RETURN(FALSE);
|
|
|
|
}
|
2007-07-12 13:29:51 +02:00
|
|
|
}
|
|
|
|
|
2009-02-19 22:09:35 +01:00
|
|
|
/*
|
|
|
|
This query execution might have started after the query cache was flushed
|
|
|
|
by a concurrent INSERT. In this case, don't cache this statement as the
|
|
|
|
data file length difference might not be visible yet if the tables haven't
|
|
|
|
been unlocked by the concurrent insert thread.
|
|
|
|
*/
|
|
|
|
if (file->state->uncacheable)
|
|
|
|
DBUG_RETURN(FALSE);
|
|
|
|
|
2007-07-12 13:29:51 +02:00
|
|
|
/* It is ok to try to cache current statement. */
|
2008-03-13 16:39:27 +01:00
|
|
|
DBUG_RETURN(TRUE);
|
2007-07-12 13:29:51 +02:00
|
|
|
}
|
|
|
|
#endif
|