Merge paul@bk-internal.mysql.com:/home/bk/mysql-4.1

into ice.snake.net:/Volumes/ice2/MySQL/bk/mysql-4.1
This commit is contained in:
paul@ice.snake.net 2004-06-23 18:11:09 -05:00
commit 3029ae651d
62 changed files with 1547 additions and 1227 deletions

View file

@ -116,6 +116,7 @@ mskold@mysql.com
msvensson@build.mysql.com
mwagner@cash.mwagner.org
mwagner@evoq.mwagner.org
mwagner@here.mwagner.org
mwagner@work.mysql.com
mydev@mysql.com
mysql@home.(none)

View file

@ -102,27 +102,13 @@ sub main
# exist in the new mysql distributions, but let's be sure..
unlink("$destdir/PUBLIC", "$destdir/README");
copy("$WD/Docs/MySQLEULA.txt", "$destdir");
# remove readline subdir and update configure accordingly
system("rm -rf $destdir/cmd-line-utils/readline");
if ($win_flag) {
chdir("$destdir") or (print "$! Unable to change directory to $desdir!\n" && exit(0));
} else {
chdir("$destdir");
unlink ("configure") or die "Can't delete $destdir/configure: $!\n";
open(CONFIGURE,"<configure.in") or die "$! Unable to open configure.in to read from!\n";
undef $/;
my $configure = <CONFIGURE>;
close(CONFIGURE);
$configure =~ s|cmd\-line\-utils/readline/Makefile dnl\n?||g;
open(CONFIGURE,">configure.in") or die "$! Unable to open configure.in to write to!\n";
print CONFIGURE $configure;
close(CONFIGURE);
`autoconf`;
if (! -f "configure") {
print "\"./configure\" was not produced, exiting!\n";
exit(0);
}
# remove readline, bdb subdirs and update 'configure'
my @extra_fat= ('bdb', 'cmd-line-utils/readline');
foreach my $fat (@extra_fat)
{
&trim_the_fat($fat);
}
# fix file copyrights
@ -154,6 +140,39 @@ sub main
exit(0);
}
####
#### This function will remove unwanted parts of a src tree for the mysqlcom
#### distributions.
####
sub trim_the_fat
{
my $the_fat= shift;
my $cwd= getcwd();
system("rm -rf $destdir/${the_fat}");
if ($win_flag)
{
chdir("$destdir") or die "Unable to change directory to $destdir!: $!\n";
}
else
{
chdir("$destdir");
unlink ("configure") or die "Can't delete $destdir/configure: $!\n";
open(CONFIGURE,"<configure.in") or die "Unable to open configure.in for read: $!\n";
undef $/;
my $configure= <CONFIGURE>;
close(CONFIGURE);
$configure=~ s|${the_fat}/Makefile dnl\n?||g;
open(CONFIGURE,">configure.in") or die "Unable to open configure.in for write: $!\n";
print CONFIGURE $configure;
close(CONFIGURE);
`autoconf`;
die "'./configure' was not produced!" unless (-f "configure");
chdir("$cwd");
}
}
####
#### mysqld and MySQL client programs have a usage printed with --help.
#### This usage includes a copyright, which needs to be modified

View file

@ -1544,6 +1544,8 @@ static void dumpTable(uint numFields, char *table)
fputs(ptr, md_result_file);
}
}
else
fputs("NULL", md_result_file);
}
}

View file

@ -224,7 +224,7 @@ Q_ENABLE_QUERY_LOG, Q_DISABLE_QUERY_LOG,
Q_ENABLE_RESULT_LOG, Q_DISABLE_RESULT_LOG,
Q_SERVER_START, Q_SERVER_STOP,Q_REQUIRE_MANAGER,
Q_WAIT_FOR_SLAVE_TO_STOP,
Q_REQUIRE_VERSION,
Q_REQUIRE_VERSION, Q_REQUIRE_OS,
Q_ENABLE_WARNINGS, Q_DISABLE_WARNINGS,
Q_ENABLE_INFO, Q_DISABLE_INFO,
Q_ENABLE_METADATA, Q_DISABLE_METADATA,
@ -298,6 +298,7 @@ const char *command_names[]=
"require_manager",
"wait_for_slave_to_stop",
"require_version",
"require_os",
"enable_warnings",
"disable_warnings",
"enable_info",
@ -853,6 +854,27 @@ int do_require_version(struct st_query* q)
return 0;
}
int do_require_os(struct st_query* q)
{
char *p=q->first_argument, *os_arg;
DBUG_ENTER("do_require_os");
if (!*p)
die("Missing version argument in require_os\n");
os_arg= p;
while (*p && !my_isspace(charset_info,*p))
p++;
*p = 0;
if (strcmp(os_arg, "unix"))
die("For now only testing of os=unix is implemented\n");
#if defined(__NETWARE__) || defined(__WIN__) || defined(__OS2__)
abort_not_supported_test();
#endif
DBUG_RETURN(0);
}
int do_source(struct st_query* q)
{
char* p=q->first_argument, *name;
@ -2715,6 +2737,7 @@ int main(int argc, char **argv)
case Q_SLEEP: do_sleep(q, 0); break;
case Q_REAL_SLEEP: do_sleep(q, 1); break;
case Q_REQUIRE_VERSION: do_require_version(q); break;
case Q_REQUIRE_OS: do_require_os(q); break;
case Q_WAIT_FOR_SLAVE_TO_STOP: do_wait_for_slave_to_stop(q); break;
case Q_REQUIRE_MANAGER: do_require_manager(q); break;
#ifndef EMBEDDED_LIBRARY

View file

@ -2776,12 +2776,14 @@ then
if test X"$have_isam" != Xno
then
sql_server_dirs="$sql_server_dirs isam merge"
AC_CONFIG_FILES(isam/Makefile merge/Makefile)
fi
if test X"$have_berkeley_db" != Xno; then
if test X"$have_berkeley_db" != Xyes; then
# we must build berkeley db from source
sql_server_dirs="$sql_server_dirs $have_berkeley_db"
AC_CONFIG_FILES(bdb/Makefile)
echo "CONFIGURING FOR BERKELEY DB"
bdb_conf_flags=
@ -3021,9 +3023,8 @@ fi
AC_SUBST(MAKE_BINARY_DISTRIBUTION_OPTIONS)
# Output results
AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile isam/Makefile dnl
AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile dnl
strings/Makefile regex/Makefile heap/Makefile dnl
bdb/Makefile dnl
myisam/Makefile myisammrg/Makefile dnl
os2/Makefile os2/include/Makefile os2/include/sys/Makefile dnl
man/Makefile BUILD/Makefile vio/Makefile dnl
@ -3031,7 +3032,7 @@ AC_CONFIG_FILES(Makefile extra/Makefile mysys/Makefile isam/Makefile dnl
libmysql/Makefile client/Makefile dnl
pstack/Makefile pstack/aout/Makefile sql/Makefile sql/share/Makefile dnl
sql-common/Makefile SSL/Makefile dnl
merge/Makefile dbug/Makefile scripts/Makefile dnl
dbug/Makefile scripts/Makefile dnl
include/Makefile sql-bench/Makefile tools/Makefile dnl
tests/Makefile Docs/Makefile support-files/Makefile dnl
support-files/MacOSX/Makefile mysql-test/Makefile dnl

View file

@ -377,7 +377,4 @@ typedef ulong ha_rows;
#define MAX_FILE_SIZE LONGLONG_MAX
#endif
/* Currently used for saying which interfaces a Storage Engine implements */
#define HA_ERR_NOT_IMPLEMENTED -1
#endif /* _my_base_h */

View file

@ -47,7 +47,7 @@ extern "C" {
#define MI_NAME_IEXT ".MYI"
#define MI_NAME_DEXT ".MYD"
/* Max extra space to use when sorting keys */
#define MI_MAX_TEMP_LENGTH 256*1024L*1024L
#define MI_MAX_TEMP_LENGTH 2*1024L*1024L*1024L
/* Possible values for myisam_block_size (must be power of 2) */
#define MI_KEY_BLOCK_LENGTH 1024 /* default key block length */

View file

@ -32,6 +32,14 @@ or there was no master log position info inside InnoDB. */
extern char trx_sys_mysql_master_log_name[];
extern ib_longlong trx_sys_mysql_master_log_pos;
/* If this MySQL server uses binary logging, after InnoDB has been inited
and if it has done a crash recovery, we store the binlog file name and position
here. If .._pos is -1, it means there was no binlog position info inside
InnoDB. */
extern char trx_sys_mysql_bin_log_name[];
extern ib_longlong trx_sys_mysql_bin_log_pos;
/* The transaction system */
extern trx_sys_t* trx_sys;

View file

@ -45,6 +45,15 @@ or there was no master log position info inside InnoDB. */
char trx_sys_mysql_master_log_name[TRX_SYS_MYSQL_LOG_NAME_LEN];
ib_longlong trx_sys_mysql_master_log_pos = -1;
/* If this MySQL server uses binary logging, after InnoDB has been inited
and if it has done a crash recovery, we store the binlog file name and position
here. If .._pos is -1, it means there was no binlog position info inside
InnoDB. */
char trx_sys_mysql_bin_log_name[TRX_SYS_MYSQL_LOG_NAME_LEN];
ib_longlong trx_sys_mysql_bin_log_pos = -1;
/********************************************************************
Determines if a page number is located inside the doublewrite buffer. */
@ -648,8 +657,8 @@ trx_sys_print_mysql_binlog_offset_from_page(
}
/*********************************************************************
Prints to stderr the MySQL binlog offset info in the trx system header if
the magic number shows it valid. */
Stores the MySQL binlog offset info in the trx system header if
the magic number shows it valid, and print the info to stderr */
void
trx_sys_print_mysql_binlog_offset(void)
@ -657,7 +666,8 @@ trx_sys_print_mysql_binlog_offset(void)
{
trx_sysf_t* sys_header;
mtr_t mtr;
ulong trx_sys_mysql_bin_log_pos_high, trx_sys_mysql_bin_log_pos_low;
mtr_start(&mtr);
sys_header = trx_sysf_get(&mtr);
@ -671,14 +681,22 @@ trx_sys_print_mysql_binlog_offset(void)
return;
}
fprintf(stderr,
"InnoDB: Last MySQL binlog file position %lu %lu, file name %s\n",
(ulong) mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
+ TRX_SYS_MYSQL_LOG_OFFSET_HIGH),
(ulong) mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
+ TRX_SYS_MYSQL_LOG_OFFSET_LOW),
sys_header + TRX_SYS_MYSQL_LOG_INFO + TRX_SYS_MYSQL_LOG_NAME);
trx_sys_mysql_bin_log_pos_high = mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
+ TRX_SYS_MYSQL_LOG_OFFSET_HIGH);
trx_sys_mysql_bin_log_pos_low = mach_read_from_4(sys_header + TRX_SYS_MYSQL_LOG_INFO
+ TRX_SYS_MYSQL_LOG_OFFSET_LOW);
trx_sys_mysql_bin_log_pos = (((ib_longlong)trx_sys_mysql_bin_log_pos_high) << 32) +
(ib_longlong)trx_sys_mysql_bin_log_pos_low;
ut_memcpy(trx_sys_mysql_bin_log_name, sys_header + TRX_SYS_MYSQL_LOG_INFO +
TRX_SYS_MYSQL_LOG_NAME, TRX_SYS_MYSQL_LOG_NAME_LEN);
fprintf(stderr,
"InnoDB: Last MySQL binlog file position %lu %lu, file name %s\n",
trx_sys_mysql_bin_log_pos_high, trx_sys_mysql_bin_log_pos_low,
trx_sys_mysql_bin_log_name);
mtr_commit(&mtr);
}

View file

@ -38,7 +38,7 @@ my_bool myisam_concurrent_insert=1;
#else
my_bool myisam_concurrent_insert=0;
#endif
my_off_t myisam_max_extra_temp_length= MI_MAX_TEMP_LENGTH;
my_off_t myisam_max_extra_temp_length= (my_off_t)MI_MAX_TEMP_LENGTH;
my_off_t myisam_max_temp_length= MAX_FILE_SIZE;
ulong myisam_bulk_insert_tree_size=8192*1024;
ulong myisam_data_pointer_size=4;

View file

@ -79,6 +79,7 @@ SUFFIXES = .sh
-e 's!@''MYSQL_TCP_PORT''@!@MYSQL_TCP_PORT@!' \
-e 's!@''MYSQL_BASE_VERSION''@!@MYSQL_BASE_VERSION@!' \
-e 's!@''MYSQL_UNIX_ADDR''@!@MYSQL_UNIX_ADDR@!' \
-e 's!@''MYSQL_TCP_PORT''@!@MYSQL_TCP_PORT@!' \
-e 's!@''MYSQL_NO_DASH_VERSION''@!@MYSQL_NO_DASH_VERSION@!' \
-e 's!@''MYSQL_SERVER_SUFFIX''@!@MYSQL_SERVER_SUFFIX@!' \
$< > $@-t

View file

@ -0,0 +1,4 @@
-- require r/have_debug.require
disable_query_log;
select (version() like "%debug%") as debug;
enable_query_log;

View file

@ -0,0 +1,4 @@
kill -9 `cat var/run/master.pid`
# The kill may fail if process has already gone away,
# so don't use the exit code of the kill. Use 0.
exit 0

View file

@ -882,8 +882,12 @@ start_master()
if [ x$MASTER_RUNNING = x1 ] || [ x$LOCAL_MASTER = x1 ] ; then
return
fi
# Remove stale binary logs
$RM -f $MYSQL_TEST_DIR/var/log/master-bin.*
# Remove stale binary logs except for 2 tests which need them
if [ "$tname" != "rpl_crash_binlog_ib_1b" ] && [ "$tname" != "rpl_crash_binlog_ib_2b" ] && [ "$tname" != "rpl_crash_binlog_ib_3b" ]
then
$RM -f $MYSQL_TEST_DIR/var/log/master-bin.*
fi
# Remove old master.info and relay-log.info files
$RM -f $MYSQL_TEST_DIR/var/master-data/master.info $MYSQL_TEST_DIR/var/master-data/relay-log.info
@ -1005,8 +1009,12 @@ start_slave()
slave_sock="$SLAVE_MYSOCK"
fi
# Remove stale binary logs and old master.info files
$RM -f $MYSQL_TEST_DIR/var/log/$slave_ident-*bin.*
$RM -f $slave_datadir/master.info $slave_datadir/relay-log.info
# except for too tests which need them
if [ "$tname" != "rpl_crash_binlog_ib_1b" ] && [ "$tname" != "rpl_crash_binlog_ib_2b" ] && [ "$tname" != "rpl_crash_binlog_ib_3b" ]
then
$RM -f $MYSQL_TEST_DIR/var/log/$slave_ident-*bin.*
$RM -f $slave_datadir/master.info $slave_datadir/relay-log.info
fi
#run slave initialization shell script if one exists
if [ -f "$slave_init_script" ] ;

View file

@ -1181,3 +1181,12 @@ a
A
a
drop table t1;
set autocommit=0;
create table t1(b varchar(30)) engine=bdb;
insert into t1 values ('one');
commit;
select b FROM t1 outer_table where
exists (select 'two' from t1 where 'two' = outer_table.b);
b
drop table t1;
set autocommit=1;

View file

@ -7,8 +7,8 @@ INSERT INTO t1 VALUES('MySQL has now support', 'for full-text search'),
('Full-text search in MySQL', 'implements vector space model');
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t1 1 a 1 a A NULL NULL NULL YES FULLTEXT
t1 1 a 2 b A NULL NULL NULL YES FULLTEXT
t1 1 a 1 a NULL NULL NULL NULL YES FULLTEXT
t1 1 a 2 b NULL NULL NULL NULL YES FULLTEXT
select * from t1 where MATCH(a,b) AGAINST ("collections");
a b
Only MyISAM tables support collections
@ -223,7 +223,7 @@ id
show keys from t2;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
t2 1 tig 1 ticket A NULL NULL NULL YES BTREE
t2 1 tix 1 inhalt A NULL NULL NULL YES FULLTEXT
t2 1 tix 1 inhalt NULL NULL NULL NULL YES FULLTEXT
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (

View file

@ -0,0 +1,2 @@
debug
1

View file

@ -141,7 +141,7 @@ INSERT INTO t1 VALUES ("1\""), ("\"2");
</mysqldump>
DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(255)) DEFAULT CHARSET koi8r;
INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5');
INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5'), (NULL);
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
@ -159,6 +159,7 @@ CREATE TABLE `t1` (
/*!40000 ALTER TABLE `t1` DISABLE KEYS */;
LOCK TABLES `t1` WRITE;
INSERT INTO `t1` VALUES ('абцде');
INSERT INTO `t1` VALUES (NULL);
UNLOCK TABLES;
/*!40000 ALTER TABLE `t1` ENABLE KEYS */;

View file

@ -822,3 +822,28 @@ alter table t1 modify a char(10) binary;
explain select a from t1;
select a from t1;
drop table t1;
#
# Bug #4000: problem with active cursor.
#
set autocommit=0;
create table t1(b varchar(30)) engine=bdb;
insert into t1 values ('one');
commit;
select b FROM t1 outer_table where
exists (select 'two' from t1 where 'two' = outer_table.b);
drop table t1;
set autocommit=1;
#
# Bug #4089: subselect and open cursor.
#
#create table t1(a int primary key, b varchar(30)) engine=bdb;
#insert into t1 values (1,'one'), (2,'two'), (3,'three'), (4,'four');
#create table t2 like t1;
#insert into t2 (a, b)
# select a, b from t1 where (a, b) in (select a, b from t1);
#select * from t2;
#drop table t1, t2;

View file

@ -73,11 +73,12 @@ DROP TABLE t1;
#
# Bug #1994
# Bug #4261
#
CREATE TABLE t1 (a VARCHAR(255)) DEFAULT CHARSET koi8r;
INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5');
--exec $MYSQL_DUMP --skip-comments test t1
INSERT INTO t1 VALUES (_koi8r x'C1C2C3C4C5'), (NULL);
--exec $MYSQL_DUMP --skip-comments --skip-extended-insert test t1
DROP TABLE t1;
#

View file

@ -481,13 +481,13 @@ int ha_archive::update_row(const byte * old_data, byte * new_data)
{
DBUG_ENTER("ha_archive::update_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::delete_row(const byte * buf)
{
DBUG_ENTER("ha_archive::delete_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_read(byte * buf, const byte * key,
@ -496,7 +496,7 @@ int ha_archive::index_read(byte * buf, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_archive::index_read");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
@ -505,32 +505,32 @@ int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_archive::index_read_idx");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_next(byte * buf)
{
DBUG_ENTER("ha_archive::index_next");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_prev(byte * buf)
{
DBUG_ENTER("ha_archive::index_prev");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_first(byte * buf)
{
DBUG_ENTER("ha_archive::index_first");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_archive::index_last(byte * buf)
{
DBUG_ENTER("ha_archive::index_last");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -581,6 +581,6 @@ ha_rows ha_archive::records_in_range(int inx,
enum ha_rkey_function end_search_flag)
{
DBUG_ENTER("ha_archive::records_in_range ");
DBUG_RETURN(records); // HA_ERR_NOT_IMPLEMENTED
DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND
}
#endif /* HAVE_ARCHIVE_DB */

View file

@ -22,7 +22,7 @@
/*
Please read ha_archive.cc first. If you are looking for more general
answers on how storage engines work, look at ha_example.cc and
answers on how storage engines work, look at ha_example.cc and
ha_example.h.
*/
@ -36,7 +36,7 @@ typedef struct st_archive_share {
bool dirty; /* Flag for if a flush should occur */
} ARCHIVE_SHARE;
/*
/*
Version for file format.
1 - Initial Version
*/
@ -61,7 +61,7 @@ public:
/* The size of the offset value we will use for position() */
ref_length = sizeof(z_off_t);
}
~ha_archive()
~ha_archive()
{
}
const char *table_type() const { return "ARCHIVE"; }
@ -69,21 +69,18 @@ public:
const char **bas_ext() const;
ulong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_WRITE_DELAYED |
HA_NO_AUTO_INCREMENT);
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT |
HA_FILE_BASED);
}
ulong index_flags(uint inx) const
ulong index_flags(uint idx, uint part) const
{
return 0;
}
/*
This is just a default, there is no real limit as far as
/*
Have to put something here, there is no real limit as far as
archive is concerned.
*/
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return 0; }
uint max_key_parts() const { return 0; }
uint max_key_length() const { return 0; }
uint max_supported_record_length() const { return UINT_MAX; }
/*
Called in test_quick_select to determine if indexes should be used.
*/

View file

@ -14,24 +14,24 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
ha_example is a stubbed storage engine. It does nothing at this point. It
will let you create/open/delete tables but that is all. You can enable it
/*
ha_example is a stubbed storage engine. It does nothing at this point. It
will let you create/open/delete tables but that is all. You can enable it
in your buld by doing the following during your build process:
./configure --with-example-storage-engine
Once this is done mysql will let you create tables with:
CREATE TABLE A (...) ENGINE=EXAMPLE;
The example is setup to use table locks. It implements an example "SHARE"
that is inserted into a hash by table name. You can use this to store
that is inserted into a hash by table name. You can use this to store
information of state that any example handler object will be able to see
if it is using the same table.
Please read the object definition in ha_example.h before reading the rest
Please read the object definition in ha_example.h before reading the rest
if this file.
To get an idea of what occurs here is an example select that would do a
To get an idea of what occurs here is an example select that would do a
scan of an entire table:
ha_example::store_lock
ha_example::external_lock
@ -50,13 +50,13 @@
ha_example::rnd_next
ha_example::extra
ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
ha_example::external_lock
ha_example::external_lock
ha_example::extra
ENUM HA_EXTRA_RESET Reset database to after open
In the above example has 9 row called before rnd_next signalled that it was
at the end of its data. In the above example the table was already opened
(or you would have seen a call to ha_example::open(). Calls to
In the above example has 9 row called before rnd_next signalled that it was
at the end of its data. In the above example the table was already opened
(or you would have seen a call to ha_example::open(). Calls to
ha_example::extra() are hints as to what will be occuring to the request.
Happy coding!
@ -92,7 +92,7 @@ static byte* example_get_key(EXAMPLE_SHARE *share,uint *length,
/*
Example of simple lock controls. The "share" it creates is structure we will
pass to each example handler. Do you have to have one of these? Well, you have
pieces that are used for locking, and they are needed to function.
pieces that are used for locking, and they are needed to function.
*/
static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
{
@ -130,7 +130,7 @@ static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
NullS)))
NullS)))
{
pthread_mutex_unlock(&example_mutex);
return NULL;
@ -161,7 +161,7 @@ error:
}
/*
/*
Free lock controls. We call this whenever we close a table. If the table had
the last reference to the share then we free memory associated with it.
*/
@ -182,7 +182,7 @@ static int free_share(EXAMPLE_SHARE *share)
/*
If frm_error() is called then we will use this to to find out what file extentions
If frm_error() is called then we will use this to to find out what file extentions
exist for the storage engine. This is also used by the default rename_table and
delete_table method in handler.cc.
*/
@ -190,10 +190,10 @@ const char **ha_example::bas_ext() const
{ static const char *ext[]= { NullS }; return ext; }
/*
/*
Used for opening tables. The name will be the name of the file.
A table is opened when it needs to be opened. For instance
when a request comes in for a select on the table (tables are not
when a request comes in for a select on the table (tables are not
open and closed for each request, they are cached).
Called from handler.cc by handler::ha_open(). The server opens all tables by
@ -212,12 +212,12 @@ int ha_example::open(const char *name, int mode, uint test_if_locked)
/*
Closes a table. We call the free_share() function to free any resources
Closes a table. We call the free_share() function to free any resources
that we have allocated in the "shared" structure.
Called from sql_base.cc, sql_select.cc, and table.cc.
In sql_select.cc it is only used to close up temporary tables or during
the process where a temporary table is converted over to being a
the process where a temporary table is converted over to being a
myisam table.
For sql_base.cc look at close_data_tables().
*/
@ -230,7 +230,7 @@ int ha_example::close(void)
/*
write_row() inserts a row. No extra() hint is given currently if a bulk load
is happeneding. buf() is a byte array of data. You can use the field
is happeneding. buf() is a byte array of data. You can use the field
information to extract the data from the native byte array type.
Example of this would be:
for (Field **field=table->field ; *field ; field++)
@ -238,20 +238,20 @@ int ha_example::close(void)
...
}
See ha_tina.cc for an example of extracting all of the data as strings.
See ha_tina.cc for an example of extracting all of the data as strings.
ha_berekly.cc has an example of how to store it intact by "packing" it
for ha_berkeley's own native storage type.
See the note for update_row() on auto_increments and timestamps. This
case also applied to write_row().
Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
*/
int ha_example::write_row(byte * buf)
{
DBUG_ENTER("ha_example::write_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -274,7 +274,7 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
{
DBUG_ENTER("ha_example::update_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -282,8 +282,8 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
This will delete a row. buf will contain a copy of the row to be deleted.
The server will call this right after the current row has been called (from
either a previous rnd_nexT() or index call).
If you keep a pointer to the last row or can access a primary key it will
make doing the deletion quite a bit easier.
If you keep a pointer to the last row or can access a primary key it will
make doing the deletion quite a bit easier.
Keep in mind that the server does no guarentee consecutive deletions. ORDER BY
clauses can be used.
@ -294,7 +294,7 @@ int ha_example::update_row(const byte * old_data, byte * new_data)
int ha_example::delete_row(const byte * buf)
{
DBUG_ENTER("ha_example::delete_row");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -309,7 +309,7 @@ int ha_example::index_read(byte * buf, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_example::index_read");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -323,7 +323,7 @@ int ha_example::index_read_idx(byte * buf, uint index, const byte * key,
__attribute__((unused)))
{
DBUG_ENTER("ha_example::index_read_idx");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -333,7 +333,7 @@ int ha_example::index_read_idx(byte * buf, uint index, const byte * key,
int ha_example::index_next(byte * buf)
{
DBUG_ENTER("ha_example::index_next");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -343,40 +343,40 @@ int ha_example::index_next(byte * buf)
int ha_example::index_prev(byte * buf)
{
DBUG_ENTER("ha_example::index_prev");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
index_first() asks for the first key in the index.
Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
and sql_select.cc.
*/
int ha_example::index_first(byte * buf)
{
DBUG_ENTER("ha_example::index_first");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
index_last() asks for the last key in the index.
Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
and sql_select.cc.
*/
int ha_example::index_last(byte * buf)
{
DBUG_ENTER("ha_example::index_last");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
/*
rnd_init() is called when the system wants the storage engine to do a table
scan.
See the example in the introduction at the top of this file to see when
scan.
See the example in the introduction at the top of this file to see when
rnd_init() is called.
Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, sql_table.cc,
@ -385,11 +385,16 @@ int ha_example::index_last(byte * buf)
int ha_example::rnd_init(bool scan)
{
DBUG_ENTER("ha_example::rnd_init");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_example::rnd_end()
{
DBUG_ENTER("ha_example::rnd_end");
DBUG_RETURN(0);
}
/*
/*
This is called for each row of the table scan. When you run out of records
you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.
The Field structure for the table is the key to getting data into buf
@ -415,8 +420,8 @@ int ha_example::rnd_next(byte *buf)
the size needed to store current_position. ref is just a byte array
that the server will maintain. If you are using offsets to mark rows, then
current_position should be the offset. If it is a primary key like in
BDB, then it needs to be a primary key.
BDB, then it needs to be a primary key.
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
void ha_example::position(const byte *record)
@ -436,7 +441,7 @@ void ha_example::position(const byte *record)
int ha_example::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_example::rnd_pos");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
@ -449,9 +454,9 @@ int ha_example::rnd_pos(byte * buf, byte *pos)
if (records < 2)
records = 2;
The reason is that the server will optimize for cases of only a single
record. If in a table scan you don't know the number of records
record. If in a table scan you don't know the number of records
it will probably be better to set records to two so you can return
as many records as you need.
as many records as you need.
Along with records a few more variables you may wish to set are:
records
deleted
@ -518,9 +523,9 @@ int ha_example::reset(void)
/*
Used to delete all rows in a table. Both for cases of truncate and
for cases where the optimizer realizes that all rows will be
removed as a result of a SQL statement.
removed as a result of a SQL statement.
Called from item_sum.cc by Item_func_group_concat::clear(),
Called from item_sum.cc by Item_func_group_concat::clear(),
Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
Called from sql_delete.cc by mysql_delete().
Called from sql_select.cc by JOIN::reinit().
@ -529,12 +534,12 @@ int ha_example::reset(void)
int ha_example::delete_all_rows()
{
DBUG_ENTER("ha_example::delete_all_rows");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
First you should go read the section "locking functions for mysql" in
/*
First you should go read the section "locking functions for mysql" in
lock.cc to understand this.
This create a lock on the table. If you are implementing a storage engine
that can handle transacations look at ha_berkely.cc to see how you will
@ -564,7 +569,7 @@ int ha_example::external_lock(THD *thd, int lock_type)
lock (if we don't want to use MySQL table locks at all) or add locks
for many tables (like we do when we are using a MERGE handler).
Berkeley DB for example changes all WRITE locks to TL_WRITE_ALLOW_WRITE
Berkeley DB for example changes all WRITE locks to TL_WRITE_ALLOW_WRITE
(which signals that we are doing WRITES, but we are still allowing other
reader's and writer's.
@ -591,9 +596,9 @@ THR_LOCK_DATA **ha_example::store_lock(THD *thd,
}
/*
Used to delete a table. By the time delete_table() has been called all
Used to delete a table. By the time delete_table() has been called all
opened references to this table will have been closed (and your globally
shared references released. The variable name will just be the name of
shared references released. The variable name will just be the name of
the table. You will need to remove any files you have created at this point.
If you do not implement this, the default delete_table() is called from
@ -623,10 +628,10 @@ int ha_example::delete_table(const char *name)
int ha_example::rename_table(const char * from, const char * to)
{
DBUG_ENTER("ha_example::rename_table ");
DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/*
/*
Given a starting key, and an ending key estimate the number of rows that
will exist between the two. end_key may be empty which in case determine
if start_key matches any rows.
@ -644,14 +649,14 @@ ha_rows ha_example::records_in_range(uint inx, key_range *min_key,
/*
create() is called to create a database. The variable name will have the name
of the table. When create() is called you do not need to worry about opening
the table. Also, the FRM file will have already been created so adjusting
the table. Also, the FRM file will have already been created so adjusting
create_info will not do you any good. You can overwrite the frm file at this
point if you wish to change the table definition, but there are no methods
point if you wish to change the table definition, but there are no methods
currently provided for doing that.
Called from handle.cc by ha_create_table().
*/
int ha_example::create(const char *name, TABLE *table_arg,
int ha_example::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
DBUG_ENTER("ha_example::create");

View file

@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
/*
Please read ha_exmple.cc before reading this file.
Please keep in mind that the example storage engine implements all methods
that are required to be implemented. handler.h has a full list of methods
@ -48,55 +48,68 @@ public:
ha_example(TABLE *table): handler(table)
{
}
~ha_example()
~ha_example()
{
}
/* The name that will be used for display purposes */
const char *table_type() const { return "EXAMPLE"; }
/* The name of the index type that will be used for display */
const char *index_type(uint inx) { return "NONE"; }
const char *table_type() const { return "EXAMPLE"; }
/*
The name of the index type that will be used for display
don't implement this method unless you really have indexes
*/
const char *index_type(uint inx) { return "HASH"; }
const char **bas_ext() const;
/*
This is a list of flags that says what the storage engine
/*
This is a list of flags that says what the storage engine
implements. The current table flags are documented in
table_flags.
handler.h
*/
ulong table_flags() const
{
return 0;
}
/*
This is a list of flags that says how the storage engine
/*
This is a list of flags that says how the storage engine
implements indexes. The current index flags are documented in
handler.h. If you do not implement indexes, just return zero
handler.h. If you do not implement indexes, just return zero
here.
*/
ulong index_flags(uint inx) const
ulong index_flags(uint inx, uint part) const
{
return 0;
}
/*
/*
unireg.cc will call the following to make sure that the storage engine can
handle the data it is about to send.
Return *real* limits of your storage engine here. MySQL will do
min(your_limits, MySQL_limits) automatically
There is no need to implement ..._key_... methods if you don't suport
indexes.
*/
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return 0; }
uint max_key_parts() const { return 0; }
uint max_key_length() const { return 0; }
uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_supported_keys() const { return 0; }
uint max_supported_key_parts() const { return 0; }
uint max_supported_key_length() const { return 0; }
/*
Called in test_quick_select to determine if indexes should be used.
*/
virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
/*
/*
The next method will never be called if you do not implement indexes.
*/
virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
/*
/*
Everything below are methods that we implment in ha_example.cc.
Most of these methods are not obligatory, skip them and
MySQL will treat them as not implemented
*/
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
@ -108,21 +121,32 @@ public:
int index_prev(byte * buf);
int index_first(byte * buf);
int index_last(byte * buf);
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
/*
unlike index_init(), rnd_init() can be called two times
without rnd_end() in between (it only makes sense if scan=1).
then the second call should prepare for the new table scan
(e.g if rnd_init allocates the cursor, second call should
position it to the start of the table, no need to deallocate
and allocate it again
*/
int rnd_init(bool scan); //required
int rnd_end();
int rnd_next(byte *buf); //required
int rnd_pos(byte * buf, byte *pos); //required
void position(const byte *record); //required
void info(uint); //required
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);
int external_lock(THD *thd, int lock_type); //required
int delete_all_rows(void);
ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key);
int delete_table(const char *from);
int rename_table(const char * from, const char * to);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info); //required
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
enum thr_lock_type lock_type); //required
};

View file

@ -433,7 +433,7 @@ void Field::store_time(TIME *ltime,timestamp_type type)
bool Field::optimize_range(uint idx)
{
return !test(table->file->index_flags(idx) & HA_WRONG_ASCII_ORDER);
return test(table->file->index_flags(idx) & HA_READ_RANGE);
}
/****************************************************************************

View file

@ -330,7 +330,7 @@ static BUFFPEK *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count)
{
my_free((char*) tmp, MYF(0));
tmp=0;
}
}
}
DBUG_RETURN(tmp);
}
@ -373,7 +373,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
if (sort_form->key_read) // QQ Can be removed after the reset
file->extra(HA_EXTRA_KEYREAD); // QQ is removed
next_pos=(byte*) 0; /* Find records in sequence */
file->rnd_init();
file->ha_rnd_init();
file->extra_opt(HA_EXTRA_CACHE,
current_thd->variables.read_buff_size);
}
@ -415,7 +415,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
{
DBUG_PRINT("info",("Sort killed by user"));
(void) file->extra(HA_EXTRA_NO_CACHE);
file->rnd_end();
file->ha_rnd_end();
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
}
if (error == 0)
@ -435,7 +435,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
file->unlock_row();
}
(void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
file->rnd_end();
if (!next_pos)
file->ha_rnd_end();
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (error != HA_ERR_END_OF_FILE)
{

View file

@ -442,7 +442,6 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length)
return 0; // Identical keys
}
int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
{
char name_buff[FN_REFLEN];
@ -1350,6 +1349,7 @@ int ha_berkeley::index_end()
error=cursor->c_close(cursor);
cursor=0;
}
active_index=MAX_KEY;
DBUG_RETURN(error);
}
@ -1411,7 +1411,7 @@ int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key,
statistic_increment(ha_read_key_count,&LOCK_status);
DBUG_ENTER("index_read_idx");
current_row.flags=DB_DBT_REALLOC;
active_index= (uint) -1;
active_index=MAX_KEY;
DBUG_RETURN(read_row(key_file[keynr]->get(key_file[keynr], transaction,
pack_key(&last_key, keynr, key_buff, key,
key_len),
@ -1482,7 +1482,7 @@ int ha_berkeley::index_read(byte * buf, const byte * key,
bzero((char*) &row, sizeof(row));
error= read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV),
(char*) buf, active_index, &row, &last_key, 1);
}
}
DBUG_RETURN(error);
}
@ -1583,12 +1583,14 @@ int ha_berkeley::index_last(byte * buf)
int ha_berkeley::rnd_init(bool scan)
{
DBUG_ENTER("rnd_init");
//DBUG_ASSERT(active_index==MAX_KEY);
current_row.flags=DB_DBT_REALLOC;
DBUG_RETURN(index_init(primary_key));
}
int ha_berkeley::rnd_end()
{
active_index= MAX_KEY;
return index_end();
}
@ -1630,7 +1632,7 @@ int ha_berkeley::rnd_pos(byte * buf, byte *pos)
statistic_increment(ha_read_rnd_count,&LOCK_status);
DBUG_ENTER("ha_berkeley::rnd_pos");
active_index= (uint) -1; // Don't delete via cursor
active_index= MAX_KEY;
DBUG_RETURN(read_row(file->get(file, transaction,
get_pos(&db_pos, pos),
&current_row, 0),

View file

@ -87,24 +87,25 @@ class ha_berkeley: public handler
public:
ha_berkeley(TABLE *table): handler(table), alloc_ptr(0),rec_buff(0), file(0),
int_table_flags(HA_REC_NOT_IN_SEQ |
HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER | HA_FAST_KEY_READ |
HA_NULL_KEY | HA_BLOB_KEY | HA_NOT_EXACT_COUNT |
HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE |
HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX |
HA_KEY_READ_WRONG_STR | HA_FILE_BASED),
changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0)
{
}
int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) {}
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
ulong ha_berkeley::index_flags(uint idx, uint part) const
{
ulong flags=HA_READ_NEXT | HA_READ_PREV;
if (part == (uint)~0 ||
table->key_info[idx].key_part[part].field->key_type() != HA_KEYTYPE_TEXT)
flags|= HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE;
return flags;
}
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
ulong table_flags(void) const { return int_table_flags; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return MAX_KEY-1; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MAX_KEY_LENGTH; }
uint max_supported_keys() const { return MAX_KEY-1; }
uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_number_of_rows();
const key_map *keys_to_use_for_scanning() { return &key_map_full; }

View file

@ -111,6 +111,7 @@ int ha_heap::delete_row(const byte * buf)
int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count, &LOCK_status);
int error = heap_rkey(file,buf,active_index, key, key_len, find_flag);
table->status = error ? STATUS_NOT_FOUND : 0;
@ -119,6 +120,7 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count, &LOCK_status);
int error= heap_rkey(file, buf, active_index, key, key_len,
HA_READ_PREFIX_LAST);
@ -137,6 +139,7 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key,
int ha_heap::index_next(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_next_count,&LOCK_status);
int error=heap_rnext(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -145,6 +148,7 @@ int ha_heap::index_next(byte * buf)
int ha_heap::index_prev(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_prev_count,&LOCK_status);
int error=heap_rprev(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -153,6 +157,7 @@ int ha_heap::index_prev(byte * buf)
int ha_heap::index_first(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_first_count,&LOCK_status);
int error=heap_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -161,6 +166,7 @@ int ha_heap::index_first(byte * buf)
int ha_heap::index_last(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_last_count,&LOCK_status);
int error=heap_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;

View file

@ -40,21 +40,18 @@ class ha_heap: public handler
const char **bas_ext() const;
ulong table_flags() const
{
return (HA_READ_RND_SAME | HA_FAST_KEY_READ | HA_KEYPOS_TO_RNDPOS |
HA_NO_BLOBS | HA_NULL_KEY | HA_REC_NOT_IN_SEQ);
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME |
HA_CAN_INSERT_DELAYED);
}
ulong index_flags(uint inx) const
ulong index_flags(uint inx, uint part) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER) :
(HA_ONLY_WHOLE_INDEX | HA_WRONG_ASCII_ORDER |
HA_NOT_READ_PREFIX_LAST));
HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
HA_ONLY_WHOLE_INDEX);
}
const key_map *keys_to_use_for_scanning() { return &btree_keys; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return HA_MAX_REC_LENGTH; }
uint max_supported_keys() const { return MAX_KEY; }
double scan_time() { return (double) (records+deleted) / 20.0+10; }
double read_time(uint index, uint ranges, ha_rows rows)
{ return (double) rows / 20.0+1; }

View file

@ -407,7 +407,7 @@ innobase_mysql_print_thd(
May 14, 2004 probably no race any more,
but better be safe */
}
/* Use strmake to reduce the timeframe
for a race, compared to fwrite() */
i= (uint) (strmake(buf, s, len) - buf);
@ -1436,9 +1436,6 @@ ha_innobase::open(
last_query_id = (ulong)-1;
active_index = 0;
active_index_before_scan = (uint)-1; /* undefined value */
if (!(share=get_share(name))) {
DBUG_RETURN(1);
@ -1582,15 +1579,6 @@ ha_innobase::open(
DBUG_RETURN(0);
}
/*********************************************************************
Does nothing. */
void
ha_innobase::initialize(void)
/*=========================*/
{
}
/**********************************************************************
Closes a handle to an InnoDB table. */
@ -2660,7 +2648,7 @@ ha_innobase::index_end(void)
{
int error = 0;
DBUG_ENTER("index_end");
active_index=MAX_KEY;
DBUG_RETURN(error);
}
@ -3131,8 +3119,6 @@ ha_innobase::rnd_init(
/* Store the active index value so that we can restore the original
value after a scan */
active_index_before_scan = active_index;
if (prebuilt->clust_index_was_generated) {
err = change_active_index(MAX_KEY);
} else {
@ -3152,19 +3138,7 @@ ha_innobase::rnd_end(void)
/*======================*/
/* out: 0 or error number */
{
/* Restore the old active_index back; MySQL may assume that a table
scan does not change active_index. We only restore the value if
MySQL has called rnd_init before: sometimes MySQL seems to call
rnd_end WITHOUT calling rnd_init. */
if (active_index_before_scan != (uint)-1) {
change_active_index(active_index_before_scan);
active_index_before_scan = (uint)-1;
}
return(index_end());
return(index_end());
}
/*********************************************************************
@ -5213,4 +5187,19 @@ innobase_store_binlog_offset_and_flush_log(
/* Syncronous flush of the log buffer to disk */
log_buffer_flush_to_disk();
}
char *ha_innobase::get_mysql_bin_log_name()
{
return trx_sys_mysql_bin_log_name;
}
ulonglong ha_innobase::get_mysql_bin_log_pos()
{
/*
trx... is ib_longlong, which is a typedef for a 64-bit integer (__int64 or
longlong) so it's ok to cast it to ulonglong.
*/
return trx_sys_mysql_bin_log_pos;
}
#endif /* HAVE_INNOBASE_DB */

View file

@ -61,20 +61,11 @@ class ha_innobase: public handler
ulong start_of_scan; /* this is set to 1 when we are
starting a table scan but have not
yet fetched any row, else 0 */
uint active_index_before_scan;
/* since a table scan in InnoDB is
always done through an index, a table
scan may change active_index; but
MySQL may assume that active_index
after a table scan is the same as
before; we store the value here so
that we can restore the value after
a scan */
uint last_match_mode;/* match mode of the latest search:
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
or undefined */
longlong auto_inc_counter_for_this_stat;
ulong max_row_length(const byte *buf);
ulong max_supported_row_length(const byte *buf);
uint store_key_val_for_row(uint keynr, char* buff, uint buff_len,
const byte* record);
@ -87,13 +78,10 @@ class ha_innobase: public handler
public:
ha_innobase(TABLE *table): handler(table),
int_table_flags(HA_REC_NOT_IN_SEQ |
HA_KEYPOS_TO_RNDPOS |
HA_LASTKEY_ORDER |
HA_NULL_KEY | HA_FAST_KEY_READ |
HA_BLOB_KEY |
HA_NULL_IN_KEY | HA_FAST_KEY_READ |
HA_CAN_INDEX_BLOBS |
HA_CAN_SQL_HANDLER |
HA_NOT_EXACT_COUNT |
HA_NO_WRITE_DELAYED |
HA_PRIMARY_KEY_IN_READ_INDEX |
HA_TABLE_SCAN_ON_INDEX),
last_dup_key((uint) -1),
@ -106,14 +94,12 @@ class ha_innobase: public handler
const char *index_type(uint key_number) { return "BTREE"; }
const char** bas_ext() const;
ulong table_flags() const { return int_table_flags; }
ulong index_flags(uint idx) const
ulong index_flags(uint idx, uint part) const
{
return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER |
HA_KEY_READ_ONLY);
return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE |
HA_KEYREAD_ONLY);
}
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_supported_keys() const { return MAX_KEY; }
/* An InnoDB page must store >= 2 keys;
a secondary key record must also contain the
primary key value:
@ -121,15 +107,12 @@ class ha_innobase: public handler
less than 1 / 4 of page size which is 16 kB;
but currently MySQL does not work with keys
whose size is > MAX_KEY_LENGTH */
uint max_key_length() const { return((MAX_KEY_LENGTH <= 3500) ?
MAX_KEY_LENGTH : 3500);}
uint max_key_part_length() { return((MAX_KEY_LENGTH <= 3500) ?
MAX_KEY_LENGTH : 3500);}
uint max_supported_key_length() const { return 3500; }
uint max_supported_key_part_length() const { return 3500; }
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
bool has_transactions() { return 1;}
int open(const char *name, int mode, uint test_if_locked);
void initialize(void);
int close(void);
double scan_time();
double read_time(uint index, uint ranges, ha_rows rows);
@ -183,6 +166,9 @@ class ha_innobase: public handler
void init_table_handle_for_HANDLER();
longlong get_auto_increment();
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
static char *get_mysql_bin_log_name();
static ulonglong get_mysql_bin_log_pos();
};
extern uint innobase_init_flags, innobase_lock_type;

View file

@ -32,19 +32,20 @@ class ha_isam: public handler
public:
ha_isam(TABLE *table)
:handler(table), file(0),
int_table_flags(HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
HA_KEY_READ_WRONG_STR | HA_DUPP_POS |
HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED)
int_table_flags(HA_READ_RND_SAME |
HA_DUPP_POS | HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED)
{}
~ha_isam() {}
ulong index_flags(uint idx, uint part) const
{ return HA_READ_NEXT; } // but no HA_READ_PREV here!!!
const char *table_type() const { return "ISAM"; }
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
ulong table_flags() const { return int_table_flags; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return N_MAXKEY; }
uint max_key_parts() const { return N_MAXKEY_SEG; }
uint max_key_length() const { return N_MAX_KEY_LENGTH; }
uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_supported_keys() const { return N_MAXKEY; }
uint max_supported_key_parts() const { return N_MAXKEY_SEG; }
uint max_supported_key_length() const { return N_MAX_KEY_LENGTH; }
uint min_record_length(uint options) const;
bool low_byte_first() const { return 0; }
@ -66,7 +67,6 @@ class ha_isam: public handler
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
my_off_t row_position() { return nisam_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int external_lock(THD *thd, int lock_type);

View file

@ -32,14 +32,11 @@ class ha_isammrg: public handler
~ha_isammrg() {}
const char *table_type() const { return "MRG_ISAM"; }
const char **bas_ext() const;
ulong table_flags() const { return (HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS |
ulong table_flags() const { return (HA_READ_RND_SAME |
HA_REC_NOT_IN_SEQ | HA_FILE_BASED); }
ulong index_flags(uint idx) const { return HA_NOT_READ_PREFIX_LAST; }
ulong index_flags(uint idx, uint part) const { DBUG_ASSERT(0); return 0; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return 0; }
uint max_key_parts() const { return 0; }
uint max_key_length() const { return 0; }
uint max_supported_keys() const { return 0; }
bool low_byte_first() const { return 0; }
uint min_record_length(uint options) const;
@ -60,7 +57,6 @@ class ha_isammrg: public handler
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
my_off_t row_position() { return mrg_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int external_lock(THD *thd, int lock_type);

View file

@ -1063,6 +1063,7 @@ int ha_myisam::delete_row(const byte * buf)
int ha_myisam::index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count,&LOCK_status);
int error=mi_rkey(file,buf,active_index, key, key_len, find_flag);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -1080,6 +1081,7 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key,
int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_key_count,&LOCK_status);
int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -1088,6 +1090,7 @@ int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len)
int ha_myisam::index_next(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_next_count,&LOCK_status);
int error=mi_rnext(file,buf,active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -1096,6 +1099,7 @@ int ha_myisam::index_next(byte * buf)
int ha_myisam::index_prev(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_prev_count,&LOCK_status);
int error=mi_rprev(file,buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -1104,6 +1108,7 @@ int ha_myisam::index_prev(byte * buf)
int ha_myisam::index_first(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_first_count,&LOCK_status);
int error=mi_rfirst(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -1112,6 +1117,7 @@ int ha_myisam::index_first(byte * buf)
int ha_myisam::index_last(byte * buf)
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_last_count,&LOCK_status);
int error=mi_rlast(file, buf, active_index);
table->status=error ? STATUS_NOT_FOUND: 0;
@ -1122,6 +1128,7 @@ int ha_myisam::index_next_same(byte * buf,
const byte *key __attribute__((unused)),
uint length __attribute__((unused)))
{
DBUG_ASSERT(inited==INDEX);
statistic_increment(ha_read_next_count,&LOCK_status);
int error=mi_rnext_same(file,buf);
table->status=error ? STATUS_NOT_FOUND: 0;

View file

@ -44,10 +44,10 @@ class ha_myisam: public handler
public:
ha_myisam(TABLE *table): handler(table), file(0),
int_table_flags(HA_READ_RND_SAME | HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
HA_NULL_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_DUPP_POS | HA_BLOB_KEY | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_HAS_GEOMETRY),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
HA_CAN_INSERT_DELAYED),
can_enable_indexes(1)
{}
~ha_myisam() {}
@ -55,17 +55,15 @@ class ha_myisam: public handler
const char *index_type(uint key_number);
const char **bas_ext() const;
ulong table_flags() const { return int_table_flags; }
ulong index_flags(uint inx) const
ulong index_flags(uint inx, uint part) const
{
ulong flags=(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER);
return (flags | ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_KEY_READ_ONLY));
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return MI_MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MI_MAX_KEY_LENGTH; }
uint max_key_part_length() { return MI_MAX_KEY_LENGTH; }
uint max_supported_keys() const { return MI_MAX_KEY; }
uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
uint max_supported_key_part_length() { return MI_MAX_KEY_LENGTH; }
uint checksum() const;
int open(const char *name, int mode, uint test_if_locked);
@ -83,7 +81,7 @@ class ha_myisam: public handler
int index_first(byte * buf);
int index_last(byte * buf);
int index_next_same(byte *buf, const byte *key, uint keylen);
int index_end() { ft_handler=NULL; return 0; }
int index_end() { ft_handler=NULL; return handler::index_end(); }
int ft_init()
{
if (!ft_handler)
@ -99,7 +97,6 @@ class ha_myisam: public handler
int rnd_pos(byte * buf, byte *pos);
int restart_rnd_next(byte *buf, byte *pos);
void position(const byte *record);
my_off_t row_position() { return mi_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);

View file

@ -34,21 +34,20 @@ class ha_myisammrg: public handler
const char **bas_ext() const;
ulong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_READ_RND_SAME | HA_AUTO_PART_KEY |
HA_KEYPOS_TO_RNDPOS | HA_LASTKEY_ORDER |
HA_NULL_KEY | HA_BLOB_KEY | HA_FILE_BASED);
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
HA_CAN_INSERT_DELAYED);
}
ulong index_flags(uint inx) const
ulong index_flags(uint inx, uint part) const
{
ulong flags=(HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER);
return (flags | ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_KEY_READ_ONLY));
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return MI_MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MAX_KEY_LENGTH; }
virtual double scan_time()
uint max_supported_keys() const { return MI_MAX_KEY; }
uint max_supported_key_length() const { return MI_MAX_KEY_LENGTH; }
uint max_supported_key_part_length() { return MI_MAX_KEY_LENGTH; }
double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + file->tables; }
int open(const char *name, int mode, uint test_if_locked);
@ -71,7 +70,6 @@ class ha_myisammrg: public handler
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
my_off_t row_position() { return myrg_position(file); }
void info(uint);
int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size);

View file

@ -40,8 +40,6 @@
#endif
#ifdef HAVE_INNOBASE_DB
#include "ha_innodb.h"
#else
#define innobase_query_caching_of_table_permitted(X,Y,Z) 1
#endif
#ifdef HAVE_NDBCLUSTER_DB
#include "ha_ndbcluster.h"
@ -219,6 +217,18 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
}
}
bool ha_caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type)
{
#ifdef HAVE_INNOBASE_DB
if (cache_type == HA_CACHE_TBL_ASKTRANSACT)
return innobase_query_caching_of_table_permitted(thd, table_key,
key_length);
else
#endif
return 1;
}
int ha_init()
{
int error= 0;
@ -866,46 +876,6 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
DBUG_RETURN(error);
}
int handler::check(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::backup(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::restore(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::optimize(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::analyze(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
int handler::preload_keys(THD* thd, HA_CHECK_OPT* check_opt)
{
return HA_ADMIN_NOT_IMPLEMENTED;
}
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
@ -923,35 +893,23 @@ int handler::read_first_row(byte * buf, uint primary_key)
If there is very few deleted rows in the table, find the first row by
scanning the table.
*/
if (deleted < 10 || primary_key >= MAX_KEY ||
!(index_flags(primary_key) & HA_READ_ORDER))
if (deleted < 10 || primary_key >= MAX_KEY)
{
(void) rnd_init();
(void) ha_rnd_init(1);
while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
(void) rnd_end();
(void) ha_rnd_end();
}
else
{
/* Find the first row through the primary key */
(void) index_init(primary_key);
(void) ha_index_init(primary_key);
error=index_first(buf);
(void) index_end();
(void) ha_index_end();
}
DBUG_RETURN(error);
}
/*
The following function is only needed for tables that may be temporary tables
during joins
*/
int handler::restart_rnd_next(byte *buf, byte *pos)
{
return HA_ERR_WRONG_COMMAND;
}
/* Set a timestamp in record */
void handler::update_timestamp(byte *record)
@ -1166,7 +1124,7 @@ void handler::print_error(int error, myf errflag)
bool handler::get_error_message(int error, String* buf)
{
return false;
return FALSE;
}
@ -1235,28 +1193,6 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
}
/*
This is called to delete all rows in a table
If the handler don't support this, then this function will
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
by one.
*/
int handler::delete_all_rows()
{
return (my_errno=HA_ERR_WRONG_COMMAND);
}
bool handler::caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type)
{
if (cache_type == HA_CACHE_TBL_ASKTRANSACT)
return innobase_query_caching_of_table_permitted(thd, table_key,
key_length);
else
return 1;
}
/****************************************************************************
** Some general functions that isn't in the handler class
****************************************************************************/
@ -1279,8 +1215,6 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
if (update_create_info)
{
update_create_info_from_table(create_info, &table);
if (table.file->table_flags() & HA_DROP_BEFORE_CREATE)
table.file->delete_table(name);
}
if (lower_case_table_names == 2 &&
!(table.file->table_flags() & HA_FILE_BASED))
@ -1537,3 +1471,15 @@ int handler::compare_key(key_range *range)
cmp= key_compare_result_on_equal;
return cmp;
}
int handler::index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
int error= ha_index_init(index);
if (!error)
error= index_read(buf, key, key_len, find_flag);
if (!error)
error= ha_index_end();
return error;
}

View file

@ -46,51 +46,42 @@
#define HA_ADMIN_TRY_ALTER -7
/* Bits in table_flags() to show what database can do */
#define HA_READ_RND_SAME 1 /* Read RND-record to KEY-record
(To update with RND-read) */
#define HA_KEYPOS_TO_RNDPOS 2 /* ha_info gives pos to record */
#define HA_TABLE_SCAN_ON_INDEX 4 /* No separate data/index file */
#define HA_REC_NOT_IN_SEQ 8 /* ha_info don't return recnumber;
#define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan
with ::rnd_same() - not used yet.
see mi_rsame/heap_rsame/myrg_rsame */
#define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */
#define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber;
It returns a position to ha_r_rnd */
#define HA_HAS_GEOMETRY (1 << 4)
#define HA_CAN_GEOMETRY (1 << 4)
#define HA_FAST_KEY_READ (1 << 5) /* no need for a record cache in filesort */
#define HA_KEY_READ_WRONG_STR (1 << 6) /* keyread returns converted strings */
#define HA_NULL_KEY (1 << 7) /* One can have keys with NULL */
#define HA_DUPP_POS (1 << 8) /* ha_position() gives dupp row */
#define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */
#define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */
#define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */
#define HA_BLOB_KEY (1 << 10) /* key on blob */
#define HA_AUTO_PART_KEY (1 << 11)
#define HA_REQUIRE_PRIMARY_KEY (1 << 12)
#define HA_CAN_INDEX_BLOBS (1 << 10)
#define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */
#define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */
#define HA_NOT_EXACT_COUNT (1 << 13)
#define HA_NO_WRITE_DELAYED (1 << 14)
#define HA_CAN_INSERT_DELAYED (1 << 14) /* only handlers with table-level locks
need no special code to support
INSERT DELAYED */
#define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15)
#define HA_DROP_BEFORE_CREATE (1 << 16)
#define HA_NOT_READ_AFTER_KEY (1 << 17)
#define HA_NOT_DELETE_WITH_CACHE (1 << 18)
#define HA_NO_TEMP_TABLES (1 << 19)
#define HA_NO_PREFIX_CHAR_KEYS (1 << 20)
#define HA_CAN_FULLTEXT (1 << 21)
#define HA_CAN_SQL_HANDLER (1 << 22)
#define HA_NO_AUTO_INCREMENT (1 << 23)
#define HA_HAS_CHECKSUM (1 << 24)
/*
Next record gives next record according last record read (even
if database is updated after read). Not used at this point.
*/
#define HA_LASTKEY_ORDER (1 << 25)
/* Table data are stored in separate files */
/* Table data are stored in separate files (for lower_case_table_names) */
#define HA_FILE_BASED (1 << 26)
/* bits in index_flags(index_number) for what you can do with index */
#define HA_WRONG_ASCII_ORDER 1 /* Can't use sorting through key */
#define HA_READ_NEXT 2 /* Read next record with same key */
#define HA_READ_PREV 4 /* Read prev. record with same key */
#define HA_READ_ORDER 8 /* Read through record-keys in order */
#define HA_READ_NEXT 1 /* TODO really use this flag */
#define HA_READ_PREV 2 /* supports ::index_prev */
#define HA_READ_ORDER 4 /* index_next/prev follow sort order */
#define HA_READ_RANGE 8 /* can find all records in a range */
#define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */
#define HA_NOT_READ_PREFIX_LAST 32 /* No support for index_read_last() */
#define HA_KEY_READ_ONLY 64 /* Support HA_EXTRA_KEYREAD */
#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
/* operations for disable/enable indexes */
#define HA_KEY_SWITCH_NONUNIQ 0
@ -109,9 +100,6 @@
#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */
#define HA_DDL_ONLINE 4 /* Can create/drop without lock */
/* Return value for ddl methods */
#define HA_DDL_NOT_IMPLEMENTED -1
/*
Parameters for open() (in register form->filestat)
HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
@ -240,6 +228,18 @@ class handler :public Sql_alloc
protected:
struct st_table *table; /* The table definition */
virtual int index_init(uint idx) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
/*
rnd_init() can be called two times without rnd_end() in between
(it only makes sense if scan=1).
then the second call should prepare for the new table scan (e.g
if rnd_init allocates the cursor, second call should position it
to the start of the table, no need to deallocate and allocate it again
*/
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
public:
byte *ref; /* Pointer to current row */
byte *dupp_ref; /* Pointer to dupp row */
@ -256,6 +256,7 @@ public:
time_t create_time; /* When table was created */
time_t check_time;
time_t update_time;
enum {NONE=0, INDEX, RND} inited;
/* The following are for read_range() */
key_range save_end_range, *end_range;
@ -280,11 +281,11 @@ public:
delete_length(0), auto_increment_value(0),
records(0), deleted(0), mean_rec_length(0),
create_time(0), check_time(0), update_time(0),
key_used_on_scan(MAX_KEY), active_index(MAX_REF_PARTS),
key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
ref_length(sizeof(my_off_t)), block_size(0),
raid_type(0), ft_handler(0), implicit_emptied(0)
raid_type(0), ft_handler(0), implicit_emptied(0), inited(NONE)
{}
virtual ~handler(void) {}
virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
int ha_open(const char *name, int mode, int test_if_locked);
void update_timestamp(byte *record);
void update_auto_increment();
@ -300,88 +301,140 @@ public:
virtual bool has_transactions(){ return 0;}
virtual uint extra_rec_buf_length() { return 0; }
virtual ha_rows estimate_number_of_rows() { return records+EXTRA_RECORDS; }
virtual const char *index_type(uint key_number) { return "";}
virtual int index_init(uint idx) { active_index=idx; return 0;}
virtual int index_end() {return 0; }
virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";}
int ha_index_init(uint idx)
{
DBUG_ASSERT(inited==NONE);
inited=INDEX;
return index_init(idx);
}
int ha_index_end()
{
DBUG_ASSERT(inited==INDEX);
inited=NONE;
return index_end();
}
int ha_rnd_init(bool scan=1)
{
DBUG_ASSERT(inited==NONE || (inited==RND && scan));
inited=RND;
return rnd_init(scan);
}
int ha_rnd_end()
{
DBUG_ASSERT(inited==RND);
inited=NONE;
return rnd_end();
}
/* this is neseccary in many places, e.g. in HANDLER command */
int ha_index_or_rnd_end()
{
return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
}
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
virtual void initialize(void) {}
virtual int close(void)=0;
virtual int write_row(byte * buf)=0;
virtual int update_row(const byte * old_data, byte * new_data)=0;
virtual int delete_row(const byte * buf)=0;
virtual int write_row(byte * buf) { return HA_ERR_WRONG_COMMAND; }
virtual int update_row(const byte * old_data, byte * new_data)
{ return HA_ERR_WRONG_COMMAND; }
virtual int delete_row(const byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)=0;
uint key_len, enum ha_rkey_function find_flag)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag)=0;
virtual int index_next(byte * buf)=0;
virtual int index_prev(byte * buf)=0;
virtual int index_first(byte * buf)=0;
virtual int index_last(byte * buf)=0;
uint key_len, enum ha_rkey_function find_flag);
virtual int index_next(byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_prev(byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_first(byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_last(byte * buf)
{ return HA_ERR_WRONG_COMMAND; }
virtual int index_next_same(byte *buf, const byte *key, uint keylen);
virtual int index_read_last(byte * buf, const byte * key, uint key_len)
{
return (my_errno=HA_ERR_WRONG_COMMAND);
}
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
virtual int read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_range, bool sorted);
virtual int read_range_next();
int compare_key(key_range *range);
virtual int ft_init()
{ return -1; }
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
uint keylen)
{ return NULL; }
virtual int ft_read(byte *buf) { return -1; }
virtual int rnd_init(bool scan=1)=0;
virtual int rnd_end() { return 0; }
virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; }
virtual int rnd_next(byte *buf)=0;
virtual int rnd_pos(byte * buf, byte *pos)=0;
virtual int read_first_row(byte *buf, uint primary_key);
virtual int restart_rnd_next(byte *buf, byte *pos);
/*
The following function is only needed for tables that may be temporary
tables during joins
*/
virtual int restart_rnd_next(byte *buf, byte *pos)
{ return HA_ERR_WRONG_COMMAND; }
virtual int rnd_same(byte *buf, uint inx)
{ return HA_ERR_WRONG_COMMAND; }
virtual ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{ return (ha_rows) 10; }
virtual void position(const byte *record)=0;
virtual my_off_t row_position() { return HA_OFFSET_ERROR; }
virtual void info(uint)=0;
virtual int extra(enum ha_extra_function operation)=0;
virtual int extra(enum ha_extra_function operation)
{ return 0; }
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
{
return extra(operation);
}
{ return extra(operation); }
virtual int reset() { return extra(HA_EXTRA_RESET); }
virtual int external_lock(THD *thd, int lock_type)=0;
virtual void unlock_row() {}
virtual int start_stmt(THD *thd) {return 0;}
virtual int delete_all_rows();
/*
This is called to delete all rows in a table
If the handler don't support this, then this function will
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
by one.
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
virtual longlong get_auto_increment();
virtual void update_create_info(HA_CREATE_INFO *create_info) {}
virtual int check(THD* thd, HA_CHECK_OPT* check_opt );
virtual int repair(THD* thd, HA_CHECK_OPT* check_opt);
virtual bool check_and_repair(THD *thd) {return 1;}
virtual int optimize(THD* thd,HA_CHECK_OPT* check_opt);
virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt);
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
virtual int backup(THD* thd, HA_CHECK_OPT* check_opt);
/* admin commands - called from mysql_admin_table */
virtual int check(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int backup(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/*
restore assumes .frm file must exist, and that generate_table() has been
called; It will just copy the data file and run repair.
*/
virtual int restore(THD* thd, HA_CHECK_OPT* check_opt);
virtual int dump(THD* thd, int fd = -1) { return ER_DUMP_NOT_IMPLEMENTED; }
virtual int restore(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int repair(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt)
{ return HA_ADMIN_NOT_IMPLEMENTED; }
/* end of the list of admin commands */
virtual bool check_and_repair(THD *thd) { return HA_ERR_WRONG_COMMAND; }
virtual int dump(THD* thd, int fd = -1) { return HA_ERR_WRONG_COMMAND; }
virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int indexes_are_disabled(void) {return 0;}
virtual void start_bulk_insert(ha_rows rows) {}
virtual int end_bulk_insert() {return 0; }
virtual int discard_or_import_tablespace(my_bool discard) {return -1;}
// not implemented by default
virtual int net_read_dump(NET* net)
{ return ER_DUMP_NOT_IMPLEMENTED; }
virtual int discard_or_import_tablespace(my_bool discard)
{return HA_ERR_WRONG_COMMAND;}
virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; }
virtual char *update_table_comment(const char * comment)
{ return (char*) comment;}
virtual void append_create_info(String *packet) {}
@ -396,38 +449,47 @@ public:
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
virtual ulong table_flags(void) const =0;
virtual ulong index_flags(uint idx) const
{
return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEY_READ_ONLY);
}
virtual ulong index_flags(uint idx, uint part=~0) const =0;
virtual ulong index_ddl_flags(KEY *wanted_index) const
{
return (HA_DDL_SUPPORT);
}
{ return (HA_DDL_SUPPORT); }
virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "online add index");
return (HA_DDL_NOT_IMPLEMENTED);
}
{ return (HA_ERR_WRONG_COMMAND); }
virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys)
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "online drop index");
return (HA_DDL_NOT_IMPLEMENTED);
}
virtual uint max_record_length() const =0;
virtual uint max_keys() const =0;
virtual uint max_key_parts() const =0;
virtual uint max_key_length()const =0;
virtual uint max_key_part_length() { return 255; }
{ return (HA_ERR_WRONG_COMMAND); }
uint max_record_length() const
{ return min(HA_MAX_REC_LENGTH, max_supported_record_length()); }
uint max_keys() const
{ return min(MAX_KEY, max_supported_keys()); }
uint max_key_parts() const
{ return min(MAX_REF_PARTS, max_supported_key_parts()); }
uint max_key_length() const
{ return min(MAX_KEY_LENGTH, max_supported_key_length()); }
uint max_key_part_length()
{ return min(MAX_KEY_LENGTH, max_supported_key_part_length()); }
virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
virtual uint max_supported_keys() const { return 0; }
virtual uint max_supported_key_parts() const { return MAX_REF_PARTS; }
virtual uint max_supported_key_length() const { return MAX_KEY_LENGTH; }
virtual uint max_supported_key_part_length() { return 255; }
virtual uint min_record_length(uint options) const { return 1; }
virtual bool low_byte_first() const { return 1; }
virtual uint checksum() const { return 0; }
virtual bool is_crashed() const { return 0; }
virtual bool auto_repair() const { return 0; }
/*
default rename_table() and delete_table() rename/delete files with a
given name and extensions from bas_ext()
*/
virtual int rename_table(const char *from, const char *to);
virtual int delete_table(const char *name);
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
/* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; }
virtual THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
@ -439,8 +501,6 @@ public:
Is query with this table cachable (have sense only for ASKTRANSACT
tables)
*/
static bool caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cahe_type);
};
/* Some extern variables used with handlers */
@ -457,6 +517,8 @@ extern TYPELIB tx_isolation_typelib;
#define ha_supports_generate(T) (T != DB_TYPE_INNODB)
bool ha_caching_allowed(THD* thd, char* table_key,
uint key_length, uint8 cache_type);
enum db_type ha_resolve_by_name(const char *name, uint namelen);
const char *ha_get_storage_engine(enum db_type db_type);
handler *get_new_handler(TABLE *table, enum db_type db_type);

View file

@ -1188,7 +1188,7 @@ int subselect_single_select_engine::exec()
join->thd->where= save_where;
executed= 1;
join->thd->lex->current_select= save_select;
DBUG_RETURN(join->error?join->error:1);
DBUG_RETURN(join->error ? join->error : 1);
}
if (item->engine_changed)
{
@ -1240,6 +1240,8 @@ int subselect_uniquesubquery_engine::exec()
}
else
{
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);
@ -1261,7 +1263,7 @@ int subselect_uniquesubquery_engine::exec()
subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine()
{
/* Tell handler we don't need the index anymore */
tab->table->file->index_end();
tab->table->file->ha_index_end();
}
@ -1288,6 +1290,8 @@ int subselect_indexsubquery_engine::exec()
}
else
{
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
error= table->file->index_read(table->record[0],
tab->ref.key_buff,
tab->ref.key_length,HA_READ_KEY_EXACT);

View file

@ -211,7 +211,7 @@ static SYMBOL symbols[] = {
{ "IGNORE", SYM(IGNORE_SYM)},
{ "IMPORT", SYM(IMPORT)},
{ "IN", SYM(IN_SYM)},
{ "INDEX", SYM(INDEX)},
{ "INDEX", SYM(INDEX_SYM)},
{ "INDEXES", SYM(INDEXES)},
{ "INFILE", SYM(INFILE)},
{ "INNER", SYM(INNER_SYM)},

View file

@ -25,6 +25,7 @@
#include "mysql_priv.h"
#include "sql_acl.h"
#include "sql_repl.h"
#include "ha_innodb.h" // necessary to cut the binlog when crash recovery
#include <my_dir.h>
#include <stdarg.h>
@ -296,6 +297,7 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
if ((index_file_nr= my_open(index_file_name,
O_RDWR | O_CREAT | O_BINARY ,
MYF(MY_WME))) < 0 ||
my_sync(index_file_nr, MYF(MY_WME)) ||
init_io_cache(&index_file, index_file_nr,
IO_SIZE, WRITE_CACHE,
my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)),
@ -315,16 +317,21 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
s.set_log_pos(this);
s.write(&log_file);
}
if (flush_io_cache(&log_file))
if (flush_io_cache(&log_file) ||
my_sync(log_file.file, MYF(MY_WME)))
goto err;
if (write_file_name_to_index_file)
{
/* As this is a new log file, we write the file name to the index file */
/*
As this is a new log file, we write the file name to the index
file. As every time we write to the index file, we sync it.
*/
if (my_b_write(&index_file, (byte*) log_file_name,
strlen(log_file_name)) ||
my_b_write(&index_file, (byte*) "\n", 1) ||
flush_io_cache(&index_file))
flush_io_cache(&index_file) ||
my_sync(index_file.file, MYF(MY_WME)))
goto err;
}
break;
@ -405,7 +412,8 @@ static bool copy_up_file_and_fill(IO_CACHE *index_file, my_off_t offset)
goto err;
}
/* The following will either truncate the file or fill the end with \n' */
if (my_chsize(file, offset - init_offset, '\n', MYF(MY_WME)))
if (my_chsize(file, offset - init_offset, '\n', MYF(MY_WME)) ||
my_sync(file, MYF(MY_WME)))
goto err;
/* Reset data in old index cache */
@ -995,6 +1003,8 @@ void MYSQL_LOG::new_file(bool need_lock)
open(old_name, save_log_type, new_name_ptr, index_file_name, io_cache_type,
no_auto_events, max_size);
if (this == &mysql_bin_log)
report_pos_in_innodb();
my_free(old_name,MYF(0));
end:
@ -1406,6 +1416,30 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
if (event_info->get_type_code() == QUERY_EVENT ||
event_info->get_type_code() == EXEC_LOAD_EVENT)
{
#ifndef DBUG_OFF
if (unlikely(opt_crash_binlog_innodb))
{
/*
This option is for use in rpl_crash_binlog_innodb.test.
1st we want to verify that Binlog_dump thread cannot send the
event now (because of LOCK_log): we here tell the Binlog_dump
thread to wake up, sleep for the slave to have time to possibly
receive data from the master (it should not), and then crash.
2nd we want to verify that at crash recovery the rolled back
event is cut from the binlog.
*/
if (!(--opt_crash_binlog_innodb))
{
signal_update();
sleep(2);
fprintf(stderr,"This is a normal crash because of"
" --crash-binlog-innodb\n");
assert(0);
}
DBUG_PRINT("info",("opt_crash_binlog_innodb: %d",
opt_crash_binlog_innodb));
}
#endif
error = ha_report_binlog_offset_and_commit(thd, log_file_name,
file->pos_in_file);
called_handler_commit=1;
@ -1561,6 +1595,22 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
write_error=1; // Don't give more errors
goto err;
}
#ifndef DBUG_OFF
if (unlikely(opt_crash_binlog_innodb))
{
/* see the previous MYSQL_LOG::write() method for a comment */
if (!(--opt_crash_binlog_innodb))
{
signal_update();
sleep(2);
fprintf(stderr, "This is a normal crash because of"
" --crash-binlog-innodb\n");
assert(0);
}
DBUG_PRINT("info",("opt_crash_binlog_innodb: %d",
opt_crash_binlog_innodb));
}
#endif
if ((ha_report_binlog_offset_and_commit(thd, log_file_name,
log_file.pos_in_file)))
goto err;
@ -1978,4 +2028,131 @@ bool flush_error_log()
}
/*
If the server has InnoDB on, and InnoDB has published the position of the
last committed transaction (which happens only if a crash recovery occured at
this startup) then truncate the previous binary log at the position given by
InnoDB. If binlog is shorter than the position, print a message to the error
log.
SYNOPSIS
cut_spurious_tail()
RETURN VALUES
1 Error
0 Ok
*/
bool MYSQL_LOG::cut_spurious_tail()
{
int error= 0;
char llbuf1[22], llbuf2[22];
ulonglong actual_size;
DBUG_ENTER("cut_spurious_tail");
#ifdef HAVE_INNOBASE_DB
if (have_innodb != SHOW_OPTION_YES)
DBUG_RETURN(0);
/*
This is the place where we use information from InnoDB to cut the
binlog.
*/
char *name= ha_innobase::get_mysql_bin_log_name();
ulonglong pos= ha_innobase::get_mysql_bin_log_pos();
if (name[0] == 0 || pos == ULONGLONG_MAX)
{
DBUG_PRINT("info", ("InnoDB has not set binlog info"));
DBUG_RETURN(0);
}
/* The binlog given by InnoDB normally is never an active binlog */
if (is_open() && is_active(name))
{
sql_print_error("Warning: after InnoDB crash recovery, InnoDB says that "
"the binary log of the previous run has the same name "
"'%s' as the current one; this is likely to be abnormal.",
name);
DBUG_RETURN(1);
}
sql_print_error("After InnoDB crash recovery, trying to truncate "
"the binary log '%s' at position %s corresponding to the "
"last committed transaction...", name, llstr(pos, llbuf1));
/* If we have a too long binlog, cut. If too short, print error */
int fd= my_open(name, O_EXCL | O_APPEND | O_BINARY | O_WRONLY, MYF(MY_WME));
if (fd < 0)
{
int save_errno= my_errno;
sql_print_error("Could not open the binary log '%s' for truncation.",
name);
if (save_errno != ENOENT)
sql_print_error("The binary log '%s' should not be used for "
"replication.", name);
DBUG_RETURN(1);
}
if (pos > (actual_size= my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME))))
{
sql_print_error("The binary log '%s' is shorter than its expected size "
"(actual: %s, expected: %s) so it misses at least one "
"committed transaction; so it should not be used for "
"replication.", name, llstr(actual_size, llbuf1),
llstr(pos, llbuf2));
error= 1;
goto err;
}
if (pos < actual_size)
{
sql_print_error("The binary log '%s' is bigger than its expected size "
"(actual: %s, expected: %s) so it contains a rolled back "
"transaction; now truncating that.", name,
llstr(actual_size, llbuf1), llstr(pos, llbuf2));
/*
As on some OS, my_chsize() can only pad with 0s instead of really
truncating. Then mysqlbinlog (and Binlog_dump thread) will error on
these zeroes. This is annoying, but not more (you just need to manually
switch replication to the next binlog). Fortunately, in my_chsize.c, it
says that all modern machines support real ftruncate().
*/
if ((error= my_chsize(fd, pos, 0, MYF(MY_WME))))
goto err;
}
err:
if (my_close(fd, MYF(MY_WME)))
error= 1;
#endif
DBUG_RETURN(error);
}
/*
If the server has InnoDB on, store the binlog name and position into
InnoDB. This function is used every time we create a new binlog.
SYNOPSIS
report_pos_in_innodb()
NOTES
This cannot simply be done in MYSQL_LOG::open(), because when we create
the first binlog at startup, we have not called ha_init() yet so we cannot
write into InnoDB yet.
RETURN VALUES
1 Error
0 Ok
*/
void MYSQL_LOG::report_pos_in_innodb()
{
DBUG_ENTER("report_pos_in_innodb");
#ifdef HAVE_INNOBASE_DB
if (is_open() && have_innodb == SHOW_OPTION_YES)
{
DBUG_PRINT("info", ("Reporting binlog info into InnoDB - "
"name: '%s' position: %d",
log_file_name, my_b_tell(&log_file)));
innobase_store_binlog_offset_and_flush_log(log_file_name,
my_b_tell(&log_file));
}
#endif
DBUG_VOID_RETURN;
}

View file

@ -886,6 +886,7 @@ extern my_bool opt_slave_compressed_protocol, use_temp_pool;
extern my_bool opt_readonly, lower_case_file_system;
extern my_bool opt_enable_named_pipe, opt_sync_frm;
extern my_bool opt_secure_auth;
extern uint opt_crash_binlog_innodb;
extern char *shared_memory_base_name, *mysqld_unix_port;
extern bool opt_enable_shared_memory;
extern char *default_tz_name;

View file

@ -269,11 +269,13 @@ my_bool opt_secure_auth= 0;
my_bool opt_short_log_format= 0;
my_bool opt_log_queries_not_using_indexes= 0;
my_bool lower_case_file_system= 0;
my_bool opt_innodb_safe_binlog= 0;
volatile bool mqh_used = 0;
uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options;
uint delay_key_write_options, protocol_version;
uint lower_case_table_names;
uint opt_crash_binlog_innodb;
uint volatile thread_count, thread_running, kill_cached_threads, wake_thread;
ulong back_log, connect_timeout, concurrency;
@ -2534,6 +2536,36 @@ server.");
}
}
if (opt_innodb_safe_binlog)
{
if (innobase_flush_log_at_trx_commit != 1)
{
sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
"innodb_flush_log_at_trx_commit is 1; now setting it "
"to 1.");
innobase_flush_log_at_trx_commit= 1;
}
if (innobase_unix_file_flush_method)
{
/*
This option has so many values that it's hard to know which value is
good (especially "littlesync", and on Windows... see
srv/srv0start.c).
*/
sql_print_error("Warning: --innodb-safe-binlog requires that "
"the innodb_flush_method actually synchronizes the "
"InnoDB log to disk; it is your responsibility "
"to verify that the method you chose does it.");
}
if (sync_binlog_period != 1)
{
sql_print_error("Warning: --innodb-safe-binlog is meaningful only if "
"the global sync_binlog variable is 1; now setting it "
"to 1.");
sync_binlog_period= 1;
}
}
if (ha_init())
{
sql_print_error("Can't init databases");
@ -2542,6 +2574,18 @@ server.");
if (opt_myisam_log)
(void) mi_log(1);
/*
Now that InnoDB is initialized, we can know the last good binlog position
and cut the binlog if needed. This function does nothing if there was no
crash recovery by InnoDB.
*/
if (opt_innodb_safe_binlog)
{
/* not fatal if fails (but print errors) */
mysql_bin_log.cut_spurious_tail();
}
mysql_bin_log.report_pos_in_innodb();
/* call ha_init_key_cache() on all key caches to init them */
process_key_caches(&ha_init_key_cache);
/* We must set dflt_key_cache in case we are using ISAM tables */
@ -3816,8 +3860,8 @@ enum options_mysqld
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
OPT_INNODB_FLUSH_METHOD,
OPT_INNODB_FAST_SHUTDOWN,
OPT_INNODB_FILE_PER_TABLE,
OPT_SAFE_SHOW_DB,
OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB,
OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG,
OPT_INNODB, OPT_ISAM, OPT_NDBCLUSTER, OPT_SKIP_SAFEMALLOC,
OPT_TEMP_POOL, OPT_TX_ISOLATION,
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@ -4498,6 +4542,12 @@ replicating a LOAD DATA INFILE command.",
"The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.",
(gptr*) &connect_timeout, (gptr*) &connect_timeout,
0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 },
#ifdef HAVE_REPLICATION
{"crash_binlog_innodb", OPT_CRASH_BINLOG_INNODB,
"Used only for testing, to crash when writing Nth event to binlog.",
(gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb,
0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0},
#endif
{"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT,
"How long a INSERT DELAYED thread should wait for INSERT statements before terminating.",
(gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0,
@ -4577,6 +4627,27 @@ replicating a LOAD DATA INFILE command.",
"Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.",
(gptr*) &innobase_lock_wait_timeout, (gptr*) &innobase_lock_wait_timeout,
0, GET_LONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
#ifdef HAVE_REPLICATION
/*
Disabled for the 4.1.3 release. Disabling just this paragraph of code is
enough, as then user can't set it to 1 so it will always be ignored in the
rest of code.
*/
#if MYSQL_VERSION_ID > 40103
/*
innodb_safe_binlog is not a variable, just an option. Does not make
sense to make it a variable, as it is only used at startup (and so the
value would be lost at next startup, so setting it on the fly would have no
effect).
*/
{"innodb_safe_binlog", OPT_INNODB_SAFE_BINLOG,
"After a crash recovery by InnoDB, truncate the binary log to the last \
InnoDB committed transaction. Use only if this server updates ONLY InnoDB \
tables.",
(gptr*) &opt_innodb_safe_binlog, (gptr*) &opt_innodb_safe_binlog,
0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
#endif
#endif
{"innodb_thread_concurrency", OPT_INNODB_THREAD_CONCURRENCY,
"Helps in performance tuning in heavily concurrent environments.",
(gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency,

View file

@ -413,7 +413,7 @@ QUICK_SELECT::~QUICK_SELECT()
{
if (!dont_free)
{
file->index_end();
file->ha_index_end();
free_root(&alloc,MYF(0));
}
}
@ -609,7 +609,6 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
table_map prev_tables,
ha_rows limit, bool force_quick_range)
{
uint basflag;
uint idx;
double scan_time;
DBUG_ENTER("test_quick_select");
@ -623,9 +622,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (!cond || (specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range ||
!limit)
DBUG_RETURN(0); /* purecov: inspected */
if (!((basflag= head->file->table_flags()) & HA_KEYPOS_TO_RNDPOS) &&
keys_to_use.is_set_all() || keys_to_use.is_clear_all())
DBUG_RETURN(0); /* Not smart database */
if (keys_to_use.is_clear_all())
DBUG_RETURN(0);
records=head->file->records;
if (!records)
records++; /* purecov: inspected */
@ -651,7 +649,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
/* set up parameter that is passed to all functions */
param.thd= thd;
param.baseflag=basflag;
param.baseflag=head->file->table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@ -728,7 +726,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
found_records=check_quick_select(&param, idx, *key);
if (found_records != HA_POS_ERROR && found_records > 2 &&
head->used_keys.is_set(keynr) &&
(head->file->index_flags(keynr) & HA_KEY_READ_ONLY))
(head->file->index_flags(keynr) & HA_KEYREAD_ONLY))
{
/*
We can resolve this by only reading through this key.
@ -2368,7 +2366,7 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree)
0);
else
quick=new QUICK_SELECT(param->thd, param->table, param->real_keynr[idx]);
if (quick)
{
if (quick->error ||
@ -2542,7 +2540,6 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length)
QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
table->file->index_end(); // Remove old cursor
QUICK_SELECT *quick=new QUICK_SELECT(thd, table, ref->key, 1);
KEY *key_info = &table->key_info[ref->key];
KEY_PART *key_part;
@ -2703,20 +2700,12 @@ int QUICK_SELECT_GEOM::get_next()
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts)
: QUICK_SELECT(*q), rev_it(rev_ranges)
{
bool not_read_after_key = file->table_flags() & HA_NOT_READ_AFTER_KEY;
QUICK_RANGE *r;
it.rewind();
for (r = it++; r; r = it++)
{
rev_ranges.push_front(r);
if (not_read_after_key && range_reads_after_key(r))
{
it.rewind(); // Reset range
error = HA_ERR_UNSUPPORTED;
dont_free=1; // Don't free memory from 'q'
return;
}
}
/* Remove EQ_RANGE flag for keys that are not using the full key */
for (r = rev_it++; r; r = rev_it++)
@ -2786,29 +2775,10 @@ int QUICK_SELECT_DESC::get_next()
else
{
DBUG_ASSERT(range->flag & NEAR_MAX || range_reads_after_key(range));
#ifndef NOT_IMPLEMENTED_YET
result=file->index_read(record, (byte*) range->max_key,
range->max_length,
((range->flag & NEAR_MAX) ?
HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV));
#else
/*
Heikki changed Sept 11, 2002: since InnoDB does not store the cursor
position if READ_KEY_EXACT is used to a primary key with all
key columns specified, we must use below HA_READ_KEY_OR_NEXT,
so that InnoDB stores the cursor position and is able to move
the cursor one step backward after the search.
*/
/*
Note: even if max_key is only a prefix, HA_READ_AFTER_KEY will
do the right thing - go past all keys which match the prefix
*/
result=file->index_read(record, (byte*) range->max_key,
range->max_length,
((range->flag & NEAR_MAX) ?
HA_READ_KEY_OR_NEXT : HA_READ_AFTER_KEY));
result = file->index_prev(record);
#endif
}
if (result)
{

View file

@ -90,7 +90,7 @@ public:
int init()
{
key_part_info= head->key_info[index].key_part;
return error=file->index_init(index);
return error=file->ha_index_init(index);
}
virtual int get_next();
virtual bool reverse_sorted() { return 0; }

View file

@ -46,9 +46,9 @@
#include "mysql_priv.h"
#include "sql_select.h"
static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
Field* field, COND *cond,
uint *range_fl, uint *key_prefix_length);
static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, Field* field,
COND *cond, uint *range_fl,
uint *key_prefix_length);
static int reckey_in_range(bool max_fl, TABLE_REF *ref, Field* field,
COND *cond, uint range_fl, uint prefix_len);
static int maxmin_in_range(bool max_fl, Field* field, COND *cond);
@ -166,11 +166,6 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
Item_field *item_field= ((Item_field*) expr);
TABLE *table= item_field->field->table;
if ((table->file->table_flags() & HA_NOT_READ_AFTER_KEY))
{
const_result=0;
break;
}
/*
Look for a partial key that can be used for optimization.
If we succeed, ref.key_length will contain the length of
@ -186,7 +181,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
error= table->file->index_init((uint) ref.key);
error= table->file->ha_index_init((uint) ref.key);
if (!ref.key_length)
error= table->file->index_first(table->record[0]);
@ -206,7 +201,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
table->key_read= 0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
table->file->index_end();
table->file->ha_index_end();
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
@ -260,12 +255,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
if ((table->file->table_flags() & HA_NOT_READ_AFTER_KEY))
{
const_result= 0;
break;
}
error= table->file->index_init((uint) ref.key);
error= table->file->ha_index_init((uint) ref.key);
if (!ref.key_length)
error= table->file->index_last(table->record[0]);
@ -285,7 +275,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
table->file->index_end();
table->file->ha_index_end();
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
@ -648,7 +638,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
keyinfo != keyinfo_end;
keyinfo++,idx++)
{
if (table->file->index_flags(idx) & HA_WRONG_ASCII_ORDER)
if (!(table->file->index_flags(idx) & HA_READ_ORDER))
break;
KEY_PART_INFO *part,*part_end;

View file

@ -70,7 +70,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
info->io_cache=tempfile;
reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0);
info->ref_pos=table->file->ref;
table->file->rnd_init(0);
if (!table->file->inited)
table->file->ha_rnd_init(0);
/*
table->sort.addon_field is checked because if we use addon fields,
@ -105,7 +106,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
else if (table->sort.record_pointers)
{
DBUG_PRINT("info",("using record_pointers"));
table->file->rnd_init(0);
table->file->ha_rnd_init(0);
info->cache_pos=table->sort.record_pointers;
info->cache_end=info->cache_pos+
table->sort.found_records*info->ref_length;
@ -116,7 +117,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
{
DBUG_PRINT("info",("using rr_sequential"));
info->read_record=rr_sequential;
table->file->rnd_init();
table->file->ha_rnd_init();
/* We can use record cache if we don't update dynamic length tables */
if (!table->no_cache &&
(use_record_cache > 0 ||
@ -142,7 +143,8 @@ void end_read_record(READ_RECORD *info)
{
filesort_free_buffers(info->table);
(void) info->file->extra(HA_EXTRA_NO_CACHE);
(void) info->file->rnd_end();
if (info->read_record != rr_quick) // otherwise quick_range does it
(void) info->file->ha_index_or_rnd_end();
info->table=0;
}
}

View file

@ -1428,8 +1428,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
table->field[1]->store(combo.user.str,combo.user.length, &my_charset_latin1);
table->file->index_init(0);
if (table->file->index_read(table->record[0],
if (table->file->index_read_idx(table->record[0], 0,
(byte*) table->field[0]->ptr,0,
HA_READ_KEY_EXACT))
{
@ -1440,7 +1439,6 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
else
my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0),
thd->user, thd->host_or_ip);
error= -1;
goto end;
}
old_row_exists = 0;
@ -1577,7 +1575,6 @@ end:
&thd->lex->mqh,
rights);
}
table->file->index_end();
DBUG_RETURN(error);
}
@ -1613,8 +1610,7 @@ static int replace_db_table(TABLE *table, const char *db,
table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1);
table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1);
table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1);
table->file->index_init(0);
if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr,0,
if (table->file->index_read_idx(table->record[0],0,(byte*) table->field[0]->ptr,0,
HA_READ_KEY_EXACT))
{
if (what == 'N')
@ -1668,13 +1664,11 @@ static int replace_db_table(TABLE *table, const char *db,
acl_update_db(combo.user.str,combo.host.str,db,rights);
else
acl_insert_db(combo.user.str,combo.host.str,db,rights);
table->file->index_end();
DBUG_RETURN(0);
/* This could only happen if the grant tables got corrupted */
table_error:
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
table->file->index_end();
abort:
DBUG_RETURN(-1);
@ -1796,8 +1790,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
col_privs->field[3]->pack_length());
key_copy(key,col_privs,0,key_len);
col_privs->field[4]->store("",0, &my_charset_latin1);
col_privs->file->index_init(0);
if (col_privs->file->index_read(col_privs->record[0],
if (col_privs->file->index_read_idx(col_privs->record[0],0,
(byte*) col_privs->field[0]->ptr,
key_len, HA_READ_KEY_EXACT))
{
@ -1912,7 +1905,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
List_iterator <LEX_COLUMN> iter(columns);
class LEX_COLUMN *xx;
table->file->index_init(0);
table->file->ha_index_init(0);
while ((xx=iter++))
{
ulong privileges = xx->rights;
@ -1982,7 +1975,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
my_hash_insert(&g_t->hash_columns,(byte*) grant_column);
}
}
table->file->index_end();
/*
If revoke of privileges on the table level, remove all such privileges
@ -1991,7 +1983,6 @@ static int replace_column_table(GRANT_TABLE *g_t,
if (revoke_grant)
{
table->file->index_init(0);
if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr,
key_length, HA_READ_KEY_EXACT))
goto end;
@ -2047,7 +2038,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
}
end:
table->file->index_end();
table->file->ha_index_end();
DBUG_RETURN(result);
}
@ -2560,15 +2551,13 @@ my_bool grant_init(THD *org_thd)
goto end;
t_table = tables[0].table; c_table = tables[1].table;
t_table->file->index_init(0);
t_table->file->ha_index_init(0);
if (t_table->file->index_first(t_table->record[0]))
{
t_table->file->index_end();
return_val= 0;
goto end_unlock;
}
grant_option= TRUE;
t_table->file->index_end();
/* Will be restored by org_thd->store_globals() */
my_pthread_setspecific_ptr(THR_MALLOC,&memex);
@ -2588,7 +2577,7 @@ my_bool grant_init(THD *org_thd)
{
sql_print_error("Warning: 'tables_priv' entry '%s %s@%s' "
"ignored in --skip-name-resolve mode.",
mem_check->tname, mem_check->user,
mem_check->tname, mem_check->user,
mem_check->host, mem_check->host);
continue;
}
@ -2605,6 +2594,7 @@ my_bool grant_init(THD *org_thd)
return_val=0; // Return ok
end_unlock:
t_table->file->ha_index_end();
mysql_unlock_tables(thd, lock);
thd->version--; // Force close to free memory
@ -3548,12 +3538,10 @@ int mysql_drop_user(THD *thd, List <LEX_USER> &list)
record[0])))
{
tables[0].table->file->print_error(error, MYF(0));
tables[0].table->file->index_end();
DBUG_RETURN(-1);
}
delete_dynamic_element(&acl_users, acl_userd);
}
tables[0].table->file->index_end();
}
VOID(pthread_mutex_unlock(&acl_cache->lock));

View file

@ -1044,9 +1044,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err_unlock; // Parse query
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
if (check_tables && !handler::caching_allowed(thd, table->db(),
table->key_length(),
table->type()))
if (check_tables && !ha_caching_allowed(thd, table->db(),
table->key_length(),
table->type()))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
table_list.db, table_list.alias));
@ -2687,9 +2687,9 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next)
{
TABLE *table= tables_used->table;
if (!handler::caching_allowed(thd, table->table_cache_key,
table->key_length,
table->file->table_cache_type()))
if (!ha_caching_allowed(thd, table->table_cache_key,
table->key_length,
table->file->table_cache_type()))
{
DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
tables_used->db, tables_used->alias));

View file

@ -169,6 +169,8 @@ public:
int purge_first_log(struct st_relay_log_info* rli, bool included);
bool reset_logs(THD* thd);
void close(uint exiting);
bool cut_spurious_tail();
void report_pos_in_innodb();
// iterating through the log index file
int find_log_pos(LOG_INFO* linfo, const char* log_name,

View file

@ -197,6 +197,7 @@ cleanup:
query_cache_invalidate3(thd, table_list, 1);
}
delete select;
transactional_table= table->file->has_transactions();
log_delayed= (transactional_table || table->tmp_table);
/*
@ -214,7 +215,7 @@ cleanup:
{
if (error <= 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
Query_log_event qinfo(thd, thd->query, thd->query_length,
log_delayed);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1;
@ -233,7 +234,6 @@ cleanup:
mysql_unlock_tables(thd, thd->lock);
thd->lock=0;
}
delete select;
free_underlaid_joins(thd, &thd->lex->select_lex);
if (error >= 0 || thd->net.report_error)
send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN: 0);

View file

@ -72,6 +72,7 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables, bool dont_send_ok)
if (*ptr)
{
(*ptr)->file->ha_index_or_rnd_end();
VOID(pthread_mutex_lock(&LOCK_open));
if (close_thread_table(thd, ptr))
{
@ -94,10 +95,14 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables, bool dont_send_ok)
int mysql_ha_closeall(THD *thd, TABLE_LIST *tables)
{
TABLE **ptr=find_table_ptr_by_name(thd, tables->db, tables->real_name, 0);
if (*ptr && close_thread_table(thd, ptr))
if (*ptr)
{
/* Tell threads waiting for refresh that something has happened */
VOID(pthread_cond_broadcast(&COND_refresh));
(*ptr)->file->ha_index_or_rnd_end();
if (close_thread_table(thd, ptr))
{
/* Tell threads waiting for refresh that something has happened */
VOID(pthread_cond_broadcast(&COND_refresh));
}
}
return 0;
}
@ -136,7 +141,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
keyname,tables->alias);
return -1;
}
table->file->index_init(keyno);
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno);
}
List<Item> list;
@ -148,8 +154,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
uint num_rows;
byte *key;
uint key_len;
LINT_INIT(key);
LINT_INIT(key_len);
LINT_INIT(key);
LINT_INIT(key_len);
it++; // Skip first NULL field
@ -180,7 +186,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
err=table->file->index_first(table->record[0]);
else
{
if (!(err=table->file->rnd_init(1)))
table->file->ha_index_or_rnd_end();
if (!(err=table->file->ha_rnd_init(1)))
err=table->file->rnd_next(table->record[0]);
}
mode=RNEXT;

View file

@ -22,8 +22,6 @@ struct st_find_field
Field *field;
};
static void free_select(SQL_SELECT *sel);
/* Used fields */
static struct st_find_field init_used_fields[]=
@ -48,9 +46,9 @@ static struct st_find_field init_used_fields[]=
enum enum_used_fields
{
help_topic_help_topic_id= 0,
help_topic_name,
help_topic_name,
help_topic_help_category_id,
help_topic_description,
help_topic_description,
help_topic_example,
help_category_help_category_id,
@ -60,13 +58,13 @@ enum enum_used_fields
help_keyword_help_keyword_id,
help_keyword_name,
help_relation_help_topic_id,
help_relation_help_topic_id,
help_relation_help_keyword_id
};
/*
Fill st_find_field structure with pointers to fields
Fill st_find_field structure with pointers to fields
SYNOPSIS
init_fields()
@ -90,7 +88,7 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
/* We have to use 'new' here as field will be re_linked on free */
Item_field *field= new Item_field("mysql", find_fields->table_name,
find_fields->field_name);
if (!(find_fields->field= find_field_in_tables(thd, field, tables,
if (!(find_fields->field= find_field_in_tables(thd, field, tables,
&not_used, TRUE)))
DBUG_RETURN(1);
}
@ -119,12 +117,12 @@ static bool init_fields(THD *thd, TABLE_LIST *tables,
NOTE
Field 'names' is set only if more than one topic is found.
Fields 'name', 'description', 'example' are set only if
Fields 'name', 'description', 'example' are set only if
found exactly one topic.
*/
void memorize_variant_topic(THD *thd, TABLE *topics, int count,
struct st_find_field *find_fields,
struct st_find_field *find_fields,
List<String> *names,
String *name, String *description, String *example)
{
@ -136,7 +134,7 @@ void memorize_variant_topic(THD *thd, TABLE *topics, int count,
get_field(mem_root,find_fields[help_topic_description].field, description);
get_field(mem_root,find_fields[help_topic_example].field, example);
}
else
else
{
if (count == 1)
names->push_back(name);
@ -168,7 +166,7 @@ void memorize_variant_topic(THD *thd, TABLE *topics, int count,
NOTE
Field 'names' is set only if more than one topic was found.
Fields 'name', 'description', 'example' are set only if
Fields 'name', 'description', 'example' are set only if
exactly one topic was found.
*/
@ -179,12 +177,12 @@ int search_topics(THD *thd, TABLE *topics, struct st_find_field *find_fields,
{
DBUG_ENTER("search_topics");
int count= 0;
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, topics, select,1,0);
while (!read_record_info.read_record(&read_record_info))
{
if (!select->cond->val_int()) // Dosn't match like
if (!select->cond->val_int()) // Doesn't match like
continue;
memorize_variant_topic(thd,topics,count,find_fields,
names,name,description,example);
@ -219,7 +217,7 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
{
DBUG_ENTER("search_keyword");
int count= 0;
READ_RECORD read_record_info;
init_read_record(&read_record_info, thd, keywords, select,1,0);
while (!read_record_info.read_record(&read_record_info) && count<2)
@ -256,13 +254,13 @@ int search_keyword(THD *thd, TABLE *keywords, struct st_find_field *find_fields,
description description of found topic (out)
example example for found topic (out)
NOTE
NOTE
Field 'names' is set only if more than one topic was found.
Fields 'name', 'description', 'example' are set only if
Fields 'name', 'description', 'example' are set only if
exactly one topic was found.
*/
int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
struct st_find_field *find_fields, int16 key_id,
List<String> *names,
String *name, String *description, String *example)
@ -273,7 +271,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
Field *rtopic_id, *rkey_id;
DBUG_ENTER("get_topics_for_keyword");
if ((iindex_topic= find_type((char*) primary_key_name,
&topics->keynames, 1+2)-1)<0 ||
(iindex_relations= find_type((char*) primary_key_name,
@ -284,18 +282,18 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
}
rtopic_id= find_fields[help_relation_help_topic_id].field;
rkey_id= find_fields[help_relation_help_keyword_id].field;
topics->file->index_init(iindex_topic);
relations->file->index_init(iindex_relations);
topics->file->ha_index_init(iindex_topic);
relations->file->ha_index_init(iindex_relations);
rkey_id->store((longlong) key_id);
rkey_id->get_key_image(buff, rkey_id->pack_length(), rkey_id->charset(),
Field::itRAW);
int key_res= relations->file->index_read(relations->record[0],
(byte *)buff, rkey_id->pack_length(),
HA_READ_KEY_EXACT);
for ( ;
for ( ;
!key_res && key_id == (int16) rkey_id->val_int() ;
key_res= relations->file->index_next(relations->record[0]))
{
@ -305,7 +303,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
field->store((longlong) topic_id);
field->get_key_image(topic_id_buff, field->pack_length(), field->charset(),
Field::itRAW);
if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff,
field->pack_length(), HA_READ_KEY_EXACT))
{
@ -314,49 +312,11 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
count++;
}
}
topics->file->ha_index_end();
relations->file->ha_index_end();
DBUG_RETURN(count);
}
/*
Look for topics with keyword by mask
SYNOPSIS
search_topics_by_keyword()
thd Thread handler
keywords Table of keywords
topics Table of topics
relations Table of m:m relation "topic/keyword"
find_fields Filled array of info for fields
select Function to test for if matching help keyword.
Normally 'help_keyword.name like 'bit%'
RETURN VALUES
# number of topics found
names array of name of found topics (out)
name name of found topic (out)
description description of found topic (out)
example example for found topic (out)
NOTE
Field 'names' is set only if more than one topic was found.
Fields 'name', 'description', 'example' are set only if
exactly one topic was found.
*/
int search_topics_by_keyword(THD *thd,
TABLE *keywords, TABLE *topics, TABLE *relations,
struct st_find_field *find_fields,
SQL_SELECT *select, List<String> *names,
String *name, String *description, String *example)
{
int key_id;
return search_keyword(thd,keywords,find_fields,select,&key_id)!=1
? 0 : get_topics_for_keyword(thd,topics,relations,find_fields,key_id,
names,name,description,example);
}
/*
Look for categories by mask
@ -382,10 +342,10 @@ int search_categories(THD *thd, TABLE *categories,
Field *pfname= find_fields[help_category_name].field;
Field *pcat_id= find_fields[help_category_help_category_id].field;
int count= 0;
READ_RECORD read_record_info;
READ_RECORD read_record_info;
DBUG_ENTER("search_categories");
init_read_record(&read_record_info, thd, categories, select,1,0);
while (!read_record_info.read_record(&read_record_info))
{
@ -398,7 +358,7 @@ int search_categories(THD *thd, TABLE *categories,
names->push_back(lname);
}
end_read_record(&read_record_info);
DBUG_RETURN(count);
}
@ -423,7 +383,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
init_read_record(&read_record_info, thd, items, select,1,0);
while (!read_record_info.read_record(&read_record_info))
{
if (!select->cond->val_int())
if (!select->cond->val_int())
continue;
String *name= new (&thd->mem_root) String();
get_field(&thd->mem_root,pfname,name);
@ -436,7 +396,7 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
/*
Send to client answer for help request
SYNOPSIS
send_answer_1()
protocol - protocol for sending
@ -466,10 +426,10 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3)
field_list.push_back(new Item_empty_string("name",64));
field_list.push_back(new Item_empty_string("description",1000));
field_list.push_back(new Item_empty_string("example",1000));
if (protocol->send_fields(&field_list,1))
DBUG_RETURN(1);
protocol->prepare_for_resend();
protocol->store(s1);
protocol->store(s2);
@ -539,7 +499,7 @@ extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2)
SYNOPSIS
send_variant_2_list()
protocol Protocol for sending
names List of names
names List of names
cat Value of the column <is_it_category>
source_name name of category for all items..
@ -548,8 +508,8 @@ extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2)
0 Data was successefully send
*/
int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol,
List<String> *names,
int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol,
List<String> *names,
const char *cat, String *source_name)
{
DBUG_ENTER("send_variant_2_list");
@ -589,17 +549,22 @@ int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol,
table goal table
error code of error (out)
RETURN VALUES
# created SQL_SELECT
# created SQL_SELECT
*/
SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
TABLE *table, int *error)
{
cond->fix_fields(thd, tables, &cond); // can never fail
SQL_SELECT *res= make_select(table,0,0,cond,error);
return (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR))) ? 0 : res;
if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)))
{
delete res;
res=0;
}
return res;
}
/*
@ -615,9 +580,9 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables,
pfname field "name" in table
error code of error (out)
RETURN VALUES
# created SQL_SELECT
# created SQL_SELECT
*/
SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen,
@ -649,12 +614,10 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen,
int mysqld_help(THD *thd, const char *mask)
{
Protocol *protocol= thd->protocol;
SQL_SELECT *select_topics_by_name= 0, *select_keyword_by_name= 0,
*select_cat_by_name= 0, *select_topics_by_cat= 0, *select_cat_by_cat= 0,
*select_root_cats= 0;
SQL_SELECT *select;
st_find_field used_fields[array_elements(init_used_fields)];
DBUG_ENTER("mysqld_help");
TABLE_LIST tables[4];
bzero((gptr)tables,sizeof(tables));
tables[0].alias= tables[0].real_name= (char*) "help_topic";
@ -670,13 +633,13 @@ int mysqld_help(THD *thd, const char *mask)
tables[3].lock_type= TL_READ;
tables[3].next= 0;
tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql";
List<String> topics_list, categories_list, subcategories_list;
String name, description, example;
int res, count_topics, count_categories, error;
uint mlen= strlen(mask);
MEM_ROOT *mem_root= &thd->mem_root;
if (open_and_lock_tables(thd, tables))
{
res= -1;
@ -684,7 +647,7 @@ int mysqld_help(THD *thd, const char *mask)
}
/* Init tables and fields to be usable from items */
setup_tables(tables);
memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
if (init_fields(thd, tables, used_fields, array_elements(used_fields)))
{
res= -1;
@ -693,39 +656,55 @@ int mysqld_help(THD *thd, const char *mask)
size_t i;
for (i=0; i<sizeof(tables)/sizeof(TABLE_LIST); i++)
tables[i].table->file->init_table_handle_for_HANDLER();
if (!(select_topics_by_name=
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[0].table,
used_fields[help_topic_name].field,&error)) ||
!(select_cat_by_name=
prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
used_fields[help_category_name].field,&error))||
!(select_keyword_by_name=
prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
used_fields[help_keyword_name].field,&error)))
used_fields[help_topic_name].field,&error)))
{
res= -1;
goto end;
}
res= 1;
count_topics= search_topics(thd,tables[0].table,used_fields,
select_topics_by_name,&topics_list,
count_topics= search_topics(thd,tables[0].table,used_fields,
select,&topics_list,
&name, &description, &example);
delete select;
if (count_topics == 0)
count_topics= search_topics_by_keyword(thd,tables[3].table,tables[0].table,
tables[2].table,used_fields,
select_keyword_by_name,&topics_list,
&name,&description,&example);
{
int key_id;
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[3].table,
used_fields[help_keyword_name].field,&error)))
{
res= -1;
goto end;
}
count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id);
delete select;
count_topics= (count_topics != 1) ? 0 :
get_topics_for_keyword(thd,tables[0].table,tables[2].table,
used_fields,key_id,&topics_list,&name,
&description,&example);
}
if (count_topics == 0)
{
int16 category_id;
Field *cat_cat_id= used_fields[help_category_parent_category_id].field;
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
used_fields[help_category_name].field,&error)))
{
res= -1;
goto end;
}
count_categories= search_categories(thd, tables[1].table, used_fields,
select_cat_by_name,
select,
&categories_list,&category_id);
delete select;
if (!count_categories)
{
if (send_header_2(protocol,FALSE))
@ -746,22 +725,26 @@ int mysqld_help(THD *thd, const char *mask)
Item *cond_cat_by_cat=
new Item_func_equal(new Item_field(cat_cat_id),
new Item_int((int32)category_id));
if (!(select_topics_by_cat= prepare_simple_select(thd,cond_topic_by_cat,
tables,tables[0].table,
&error)) ||
!(select_cat_by_cat=
prepare_simple_select(thd,cond_cat_by_cat,tables,
tables[1].table,&error)))
if (!(select= prepare_simple_select(thd,cond_topic_by_cat,
tables,tables[0].table,&error)))
{
res= -1;
goto end;
}
get_all_items_for_category(thd,tables[0].table,
used_fields[help_topic_name].field,
select_topics_by_cat,&topics_list);
select,&topics_list);
delete select;
if (!(select= prepare_simple_select(thd,cond_cat_by_cat,tables,
tables[1].table,&error)))
{
res= -1;
goto end;
}
get_all_items_for_category(thd,tables[1].table,
used_fields[help_category_name].field,
select_cat_by_cat,&subcategories_list);
select,&subcategories_list);
delete select;
String *cat= categories_list.head();
if (send_header_2(protocol, true) ||
send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) ||
@ -780,30 +763,25 @@ int mysqld_help(THD *thd, const char *mask)
if (send_header_2(protocol, FALSE) ||
send_variant_2_list(mem_root,protocol, &topics_list, "N", 0))
goto end;
search_categories(thd, tables[1].table, used_fields,
select_cat_by_name,&categories_list, 0);
if (!(select=
prepare_select_for_name(thd,mask,mlen,tables,tables[1].table,
used_fields[help_category_name].field,&error)))
{
res= -1;
goto end;
}
search_categories(thd, tables[1].table, used_fields,
select,&categories_list, 0);
delete select;
/* Then send categories */
if (send_variant_2_list(mem_root,protocol, &categories_list, "Y", 0))
goto end;
}
res= 0;
send_eof(thd);
end:
free_select(select_topics_by_name);
free_select(select_keyword_by_name);
free_select(select_cat_by_name);
free_select(select_topics_by_cat);
free_select(select_cat_by_cat);
free_select(select_root_cats);
DBUG_RETURN(res);
}
static void free_select(SQL_SELECT *sel)
{
if (sel)
delete sel->quick;
}

View file

@ -1115,7 +1115,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg)
thd->fatal_error(); // Abort waiting inserts
goto end;
}
if (di->table->file->has_transactions())
if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED))
{
thd->fatal_error();
my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.real_name);

View file

@ -125,7 +125,7 @@ static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
Item *having);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
ulong offset,Item *having);
static int remove_dup_with_hash_index(THD *thd, TABLE *table,
static int remove_dup_with_hash_index(THD *thd,TABLE *table,
uint field_count, Field **first_field,
ulong key_length,Item *having);
static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count);
@ -616,22 +616,8 @@ JOIN::optimize()
}
if (const_tables && !thd->locked_tables &&
!(select_options & SELECT_NO_UNLOCK))
{
TABLE **curr_table, **end;
for (curr_table= table, end=curr_table + const_tables ;
curr_table != end;
curr_table++)
{
/* BDB tables require that we call index_end() before doing an unlock */
if ((*curr_table)->key_read)
{
(*curr_table)->key_read=0;
(*curr_table)->file->extra(HA_EXTRA_NO_KEYREAD);
}
(*curr_table)->file->index_end();
}
mysql_unlock_some_tables(thd, table, const_tables);
}
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
@ -1543,6 +1529,7 @@ JOIN::cleanup()
}
}
tmp_join->tmp_join= 0;
tmp_table_param.copy_field=0;
DBUG_RETURN(tmp_join->cleanup());
}
@ -3656,7 +3643,6 @@ make_join_readinfo(JOIN *join, uint options)
}
delete tab->quick;
tab->quick=0;
table->file->index_init(tab->ref.key);
tab->read_first_record= join_read_key;
tab->read_record.read_record= join_no_more_records;
if (table->used_keys.is_set(tab->ref.key) &&
@ -3676,7 +3662,6 @@ make_join_readinfo(JOIN *join, uint options)
}
delete tab->quick;
tab->quick=0;
table->file->index_init(tab->ref.key);
if (table->used_keys.is_set(tab->ref.key) &&
!table->no_keyread)
{
@ -3696,7 +3681,6 @@ make_join_readinfo(JOIN *join, uint options)
break;
case JT_FT:
table->status=STATUS_NO_RECORD;
table->file->index_init(tab->ref.key);
tab->read_first_record= join_ft_read_first;
tab->read_record.read_record= join_ft_read_next;
break;
@ -3766,7 +3750,6 @@ make_join_readinfo(JOIN *join, uint options)
!(tab->select && tab->select->quick))
{ // Only read index tree
tab->index=find_shortest_key(table, & table->used_keys);
tab->table->file->index_init(tab->index);
tab->read_first_record= join_read_first;
tab->type=JT_NEXT; // Read with index_first / index_next
}
@ -3840,9 +3823,7 @@ void JOIN_TAB::cleanup()
table->key_read= 0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
/* Don't free index if we are using read_record */
if (!read_record.table)
table->file->index_end();
table->file->ha_index_or_rnd_end();
/*
We need to reset this for next select
(Tested in part_of_refkey)
@ -3868,7 +3849,7 @@ void
JOIN::join_free(bool full)
{
JOIN_TAB *tab,*end;
DBUG_ENTER("join_free");
DBUG_ENTER("JOIN::join_free");
if (table)
{
@ -3881,24 +3862,20 @@ JOIN::join_free(bool full)
free_io_cache(table[const_tables]);
filesort_free_buffers(table[const_tables]);
}
if (!full && select_lex->uncacheable)
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
{
if (tab->table)
{
/* Don't free index if we are using read_record */
if (!tab->read_record.table)
tab->table->file->index_end();
}
}
}
else
if (full || !select_lex->uncacheable)
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
tab->cleanup();
table= 0;
}
else
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
{
if (tab->table && tab->table->file->inited == handler::RND)
tab->table->file->ha_rnd_end();
}
}
}
/*
We are not using tables anymore
@ -4149,12 +4126,6 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
item->no_rows_in_result();
result->send_data(fields);
}
if (tables) // Not from do_select()
{
/* Close open cursors */
for (TABLE_LIST *table=tables; table ; table=table->next)
table->table->file->index_end();
}
result->send_eof(); // Should be safe
}
/* Update results for FOUND_ROWS */
@ -5561,8 +5532,8 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
goto err1;
if (table->file->indexes_are_disabled())
new_table.file->disable_indexes(HA_KEY_SWITCH_ALL);
table->file->index_end();
table->file->rnd_init();
table->file->ha_index_or_rnd_end();
table->file->ha_rnd_init();
if (table->no_rows)
{
new_table.file->extra(HA_EXTRA_NO_ROWS);
@ -5584,7 +5555,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
}
/* remove heap table and change to use myisam table */
(void) table->file->rnd_end();
(void) table->file->ha_rnd_end();
(void) table->file->close();
(void) table->file->delete_table(table->real_name);
delete table->file;
@ -5598,7 +5569,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
err:
DBUG_PRINT("error",("Got error: %d",write_err));
table->file->print_error(error,MYF(0)); // Give table is full error
(void) table->file->rnd_end();
(void) table->file->ha_rnd_end();
(void) new_table.file->close();
err1:
new_table.file->delete_table(new_table.real_name);
@ -5647,7 +5618,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
{
DBUG_PRINT("info",("Using end_update"));
end_select=end_update;
table->file->index_init(0);
if (!table->file->inited)
table->file->ha_index_init(0);
}
else
{
@ -5725,9 +5697,9 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
my_errno= tmp;
error= -1;
}
if ((tmp=table->file->index_end()))
if ((tmp=table->file->ha_index_or_rnd_end()))
{
DBUG_PRINT("error",("index_end() failed"));
DBUG_PRINT("error",("ha_index_or_rnd_end() failed"));
my_errno= tmp;
error= -1;
}
@ -5994,6 +5966,11 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
if (!table->outer_join || error > 0)
DBUG_RETURN(error);
}
if (table->key_read)
{
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
}
if (tab->on_expr && !table->null_row)
{
@ -6072,6 +6049,8 @@ join_read_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
if (cmp_buffer_with_ref(tab) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
@ -6097,6 +6076,8 @@ join_read_always_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
if (cp_buffer_from_ref(&tab->ref))
return -1;
if ((error=table->file->index_read(table->record[0],
@ -6122,6 +6103,8 @@ join_read_last_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
if (cp_buffer_from_ref(&tab->ref))
return -1;
if ((error=table->file->index_read_last(table->record[0],
@ -6230,6 +6213,8 @@ join_read_first(JOIN_TAB *tab)
tab->read_record.file=table->file;
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
table->file->ha_index_init(tab->index);
if ((error=tab->table->file->index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@ -6267,6 +6252,8 @@ join_read_last(JOIN_TAB *tab)
tab->read_record.file=table->file;
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
table->file->ha_index_init(tab->index);
if ((error= tab->table->file->index_last(tab->table->record[0])))
return report_error(table, error);
return 0;
@ -6289,6 +6276,8 @@ join_ft_read_first(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
#if NOT_USED_YET
if (cp_buffer_from_ref(&tab->ref)) // as ft-key doesn't use store_key's
return -1; // see also FT_SELECT::init()
@ -6606,7 +6595,6 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (item->maybe_null)
group->buff[-1]=item->null_value ? 1 : 0;
}
// table->file->index_init(0);
if (!table->file->index_read(table->record[1],
join->tmp_table_param.group_buff,0,
HA_READ_KEY_EXACT))
@ -6637,7 +6625,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
/* Change method to update rows */
table->file->index_init(0);
table->file->ha_index_init(0);
join->join_tab[join->tables-1].next_select=end_unique_update;
}
join->send_records++;
@ -7135,10 +7123,10 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
if (tab->ref.key >= 0)
{
tab->ref.key= new_ref_key;
table->file->index_init(new_ref_key);
}
else
{
select->quick->file->ha_index_end();
select->quick->index= new_ref_key;
select->quick->init();
}
@ -7160,7 +7148,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (!select->quick->reverse_sorted())
{
if (table->file->index_flags(ref_key) & HA_NOT_READ_PREFIX_LAST)
if (!(table->file->index_flags(ref_key) & HA_READ_PREV))
DBUG_RETURN(0); // Use filesort
// ORDER BY range_key DESC
QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
@ -7182,7 +7170,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
Use a traversal function that starts by reading the last row
with key part (A) and then traverse the index backwards.
*/
if (table->file->index_flags(ref_key) & HA_NOT_READ_PREFIX_LAST)
if (!(table->file->index_flags(ref_key) & HA_READ_PREV))
DBUG_RETURN(0); // Use filesort
tab->read_first_record= join_read_last_key;
tab->read_record.read_record= join_read_prev_same;
@ -7236,7 +7224,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
tab->index=nr;
tab->read_first_record= (flag > 0 ? join_read_first:
join_read_last);
table->file->index_init(nr);
tab->type=JT_NEXT; // Read with index_first(), index_next()
if (table->used_keys.is_set(nr))
{
@ -7497,7 +7484,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
org_record=(char*) (record=table->record[0])+offset;
new_record=(char*) table->record[1]+offset;
file->rnd_init();
file->ha_rnd_init();
error=file->rnd_next(record);
for (;;)
{
@ -7609,7 +7596,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
(*field_length++)= (*ptr)->pack_length();
}
file->rnd_init();
file->ha_rnd_init();
key_pos=key_buffer;
for (;;)
{
@ -7655,14 +7642,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
my_free((char*) key_buffer,MYF(0));
hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->rnd_end();
(void) file->ha_rnd_end();
DBUG_RETURN(0);
err:
my_free((char*) key_buffer,MYF(0));
hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->rnd_end();
(void) file->ha_rnd_end();
if (error)
file->print_error(error,MYF(0));
DBUG_RETURN(1);

View file

@ -531,7 +531,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
break;
case FIELD_TYPE_GEOMETRY:
#ifdef HAVE_SPATIAL
if (!(file->table_flags() & HA_HAS_GEOMETRY))
if (!(file->table_flags() & HA_CAN_GEOMETRY))
{
my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED),
MYF(0), "GEOMETRY");
@ -669,7 +669,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
continue;
}
(*key_count)++;
tmp=max(file->max_key_parts(),MAX_REF_PARTS);
tmp=file->max_key_parts();
if (key->columns.elements > tmp)
{
my_error(ER_TOO_MANY_KEY_PARTS,MYF(0),tmp);
@ -721,7 +721,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
}
tmp=min(file->max_keys(), MAX_KEY);
tmp=file->max_keys();
if (*key_count > tmp)
{
my_error(ER_TOO_MANY_KEYS,MYF(0),tmp);
@ -881,7 +881,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (f_is_blob(sql_field->pack_flag))
{
if (!(file->table_flags() & HA_BLOB_KEY))
if (!(file->table_flags() & HA_CAN_INDEX_BLOBS))
{
my_printf_error(ER_BLOB_USED_AS_KEY,ER(ER_BLOB_USED_AS_KEY),MYF(0),
column->field_name);
@ -918,7 +918,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
}
else
key_info->flags|= HA_NULL_PART_KEY;
if (!(file->table_flags() & HA_NULL_KEY))
if (!(file->table_flags() & HA_NULL_IN_KEY))
{
my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX),
MYF(0),column->field_name);
@ -1050,7 +1050,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
if (!(key_info->flags & HA_NULL_PART_KEY))
unique_key=1;
key_info->key_length=(uint16) key_length;
uint max_key_length= min(file->max_key_length(), MAX_KEY_LENGTH);
uint max_key_length= file->max_key_length();
if (key_length > max_key_length && key->type != Key::FULLTEXT)
{
my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
@ -1142,12 +1142,21 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
alias= table_case_name(create_info, table_name);
file=get_new_handler((TABLE*) 0, create_info->db_type);
#ifdef NOT_USED
/*
if there is a technical reason for a handler not to have support
for temp. tables this code can be re-enabled.
Otherwise, if a handler author has a wish to prohibit usage of
temporary tables for his handler he should implement a check in
::create() method
*/
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
(file->table_flags() & HA_NO_TEMP_TABLES))
{
my_error(ER_ILLEGAL_HA,MYF(0),table_name);
DBUG_RETURN(-1);
}
#endif
if (mysql_prepare_table(thd, create_info, fields,
keys, tmp_table, db_options, file,
@ -3461,7 +3470,7 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
current query id */
t->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (t->file->rnd_init(1))
if (t->file->ha_rnd_init(1))
protocol->store_null();
else
{
@ -3489,6 +3498,7 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt)
crc+= row_crc;
}
protocol->store((ulonglong)crc);
t->file->ha_rnd_end();
}
}
thd->clear_error();

View file

@ -319,6 +319,7 @@ int mysql_update(THD *thd,
error= 1; // Aborted
end_read_record(&info);
free_io_cache(table); // If ORDER BY
delete select;
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
@ -358,7 +359,6 @@ int mysql_update(THD *thd,
thd->lock=0;
}
delete select;
free_underlaid_joins(thd, &thd->lex->select_lex);
if (error >= 0)
send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */
@ -964,25 +964,24 @@ int multi_update::do_updates(bool from_send_error)
TABLE_LIST *cur_table;
int local_error;
ha_rows org_updated;
TABLE *table;
TABLE *table, *tmp_table;
DBUG_ENTER("do_updates");
do_update= 0; // Don't retry this function
do_update= 0; // Don't retry this function
if (!found)
DBUG_RETURN(0);
for (cur_table= update_tables; cur_table ; cur_table= cur_table->next)
{
byte *ref_pos;
TABLE *tmp_table;
table = cur_table->table;
if (table == table_to_update)
continue; // Already updated
org_updated= updated;
tmp_table= tmp_tables[cur_table->shared];
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
(void) table->file->rnd_init(0);
(void) table->file->ha_rnd_init(0);
table->file->extra(HA_EXTRA_NO_CACHE);
/*
@ -998,7 +997,7 @@ int multi_update::do_updates(bool from_send_error)
}
copy_field_end=copy_field_ptr;
if ((local_error = tmp_table->file->rnd_init(1)))
if ((local_error = tmp_table->file->ha_rnd_init(1)))
goto err;
ref_pos= (byte*) tmp_table->field[0]->ptr;
@ -1049,7 +1048,8 @@ int multi_update::do_updates(bool from_send_error)
else
trans_safe= 0; // Can't do safe rollback
}
(void) table->file->rnd_end();
(void) table->file->ha_rnd_end();
(void) tmp_table->file->ha_rnd_end();
}
DBUG_RETURN(0);
@ -1057,6 +1057,9 @@ err:
if (!from_send_error)
table->file->print_error(local_error,MYF(0));
(void) table->file->ha_rnd_end();
(void) tmp_table->file->ha_rnd_end();
if (updated != org_updated)
{
if (table->tmp_table != NO_TMP_TABLE)

View file

@ -255,7 +255,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token IDENT_QUOTED
%token IGNORE_SYM
%token IMPORT
%token INDEX
%token INDEX_SYM
%token INDEXES
%token INFILE
%token INNER_SYM
@ -1028,7 +1028,7 @@ create:
}
create2
{ Lex->current_select= &Lex->select_lex; }
| CREATE opt_unique_or_fulltext INDEX ident key_alg ON table_ident
| CREATE opt_unique_or_fulltext INDEX_SYM ident key_alg ON table_ident
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_CREATE_INDEX;
@ -1212,7 +1212,7 @@ create_table_option:
| INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;}
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
{ Lex->create_info.data_file_name= $4.str; }
| INDEX DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; };
| INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; };
storage_engines:
ident_or_text
@ -1733,7 +1733,7 @@ constraint_key_type:
key_or_index:
KEY_SYM {}
| INDEX {};
| INDEX_SYM {};
opt_key_or_index:
/* empty */ {}
@ -1742,7 +1742,7 @@ opt_key_or_index:
keys_or_index:
KEYS {}
| INDEX {}
| INDEX_SYM {}
| INDEXES {};
opt_unique_or_fulltext:
@ -2221,7 +2221,7 @@ table_to_table:
};
keycache:
CACHE_SYM INDEX keycache_list IN_SYM key_cache_name
CACHE_SYM INDEX_SYM keycache_list IN_SYM key_cache_name
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_ASSIGN_TO_KEYCACHE;
@ -2252,7 +2252,7 @@ key_cache_name:
;
preload:
LOAD INDEX INTO CACHE_SYM
LOAD INDEX_SYM INTO CACHE_SYM
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_PRELOAD_KEYS;
@ -3859,7 +3859,7 @@ drop:
lex->drop_temporary= $2;
lex->drop_if_exists= $4;
}
| DROP INDEX ident ON table_ident {}
| DROP INDEX_SYM ident ON table_ident {}
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_DROP_INDEX;
@ -5527,7 +5527,7 @@ grant_privilege:
| REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list {}
| DELETE_SYM { Lex->grant |= DELETE_ACL;}
| USAGE {}
| INDEX { Lex->grant |= INDEX_ACL;}
| INDEX_SYM { Lex->grant |= INDEX_ACL;}
| ALTER { Lex->grant |= ALTER_ACL;}
| CREATE { Lex->grant |= CREATE_ACL;}
| DROP { Lex->grant |= DROP_ACL;}

View file

@ -494,15 +494,13 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag,
for (uint key=0 ; key < outparam->keys ; key++,keyinfo++)
{
uint usable_parts=0;
ulong index_flags;
keyinfo->name=(char*) outparam->keynames.type_names[key];
/* Fix fulltext keys for old .frm files */
if (outparam->key_info[key].flags & HA_FULLTEXT)
outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
/* This has to be done after the above fulltext correction */
index_flags=outparam->file->index_flags(key);
if (!(index_flags & HA_KEY_READ_ONLY))
if (!(outparam->file->index_flags(key) & HA_KEYREAD_ONLY))
{
outparam->read_only_keys.set_bit(key);
outparam->keys_for_keyread.clear_bit(key);
@ -577,15 +575,9 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag,
if (field->key_length() == key_part->length &&
!(field->flags & BLOB_FLAG))
{
if ((index_flags & HA_KEY_READ_ONLY) &&
(field->key_type() != HA_KEYTYPE_TEXT ||
(!((ha_option & HA_KEY_READ_WRONG_STR) ||
(field->flags & BINARY_FLAG)) &&
!(keyinfo->flags & HA_FULLTEXT))))
if (outparam->file->index_flags(key, i) & HA_KEYREAD_ONLY)
field->part_of_key.set_bit(key);
if ((field->key_type() != HA_KEYTYPE_TEXT ||
!(keyinfo->flags & HA_FULLTEXT)) &&
!(index_flags & HA_WRONG_ASCII_ORDER))
if (outparam->file->index_flags(key, i) & HA_READ_ORDER)
field->part_of_sortkey.set_bit(key);
}
if (!(key_part->key_part_flag & HA_REVERSE_SORT) &&

File diff suppressed because it is too large Load diff

View file

@ -28,7 +28,6 @@
#include "mysql_priv.h"
#include <m_ctype.h>
#define FCOMP 17 /* Bytes for a packed field */
#define FCOMP 17 /* Bytes for a packed field */
static uchar * pack_screens(List<create_field> &create_fields,