2004-05-21 03:13:11 +02:00
|
|
|
/* Copyright (C) 2003 MySQL AB
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
2005-06-02 02:43:32 +02:00
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
2004-05-21 03:13:11 +02:00
|
|
|
#pragma implementation // gcc: Class implementation
|
|
|
|
#endif
|
|
|
|
|
2005-09-29 04:04:51 +02:00
|
|
|
#include "mysql_priv.h"
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
#include "ha_archive.h"
|
2004-12-09 10:48:07 +01:00
|
|
|
#include <my_dir.h>
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
First, if you want to understand storage engines you should look at
|
|
|
|
ha_example.cc and ha_example.h.
|
|
|
|
This example was written as a test case for a customer who needed
|
|
|
|
a storage engine without indexes that could compress data very well.
|
|
|
|
So, welcome to a completely compressed storage engine. This storage
|
2004-06-07 11:06:33 +02:00
|
|
|
engine only does inserts. No replace, deletes, or updates. All reads are
|
2005-12-23 04:50:10 +01:00
|
|
|
complete table scans. Compression is done through azip (bzip compresses
|
2004-05-21 03:13:11 +02:00
|
|
|
better, but only marginally, if someone asks I could add support for
|
2005-12-23 04:50:10 +01:00
|
|
|
it too, but beaware that it costs a lot more in CPU time then azip).
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
We keep a file pointer open for each instance of ha_archive for each read
|
|
|
|
but for writes we keep one open file handle just for that. We flush it
|
2005-12-23 04:50:10 +01:00
|
|
|
only if we have a read occur. azip handles compressing lots of records
|
2004-05-21 03:13:11 +02:00
|
|
|
at once much better then doing lots of little records between writes.
|
|
|
|
It is possible to not lock on writes but this would then mean we couldn't
|
|
|
|
handle bulk inserts as well (that is if someone was trying to read at
|
|
|
|
the same time since we would want to flush).
|
|
|
|
|
2005-01-29 01:43:10 +01:00
|
|
|
A "meta" file is kept alongside the data file. This file serves two purpose.
|
|
|
|
The first purpose is to track the number of rows in the table. The second
|
|
|
|
purpose is to determine if the table was closed properly or not. When the
|
|
|
|
meta file is first opened it is marked as dirty. It is opened when the table
|
|
|
|
itself is opened for writing. When the table is closed the new count for rows
|
|
|
|
is written to the meta file and the file is marked as clean. If the meta file
|
|
|
|
is opened and it is marked as dirty, it is assumed that a crash occured. At
|
|
|
|
this point an error occurs and the user is told to rebuild the file.
|
|
|
|
A rebuild scans the rows and rewrites the meta file. If corruption is found
|
|
|
|
in the data file then the meta file is not repaired.
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2005-01-29 01:43:10 +01:00
|
|
|
At some point a recovery method for such a drastic case needs to be divised.
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2005-01-29 01:43:10 +01:00
|
|
|
Locks are row level, and you will get a consistant read.
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
For performance as far as table scans go it is quite fast. I don't have
|
|
|
|
good numbers but locally it has out performed both Innodb and MyISAM. For
|
|
|
|
Innodb the question will be if the table can be fit into the buffer
|
|
|
|
pool. For MyISAM its a question of how much the file system caches the
|
|
|
|
MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
|
|
|
|
doesn't have enough memory to cache entire table that archive turns out
|
|
|
|
to be any faster. For writes it is always a bit slower then MyISAM. It has no
|
|
|
|
internal limits though for row length.
|
|
|
|
|
2004-06-07 11:06:33 +02:00
|
|
|
Examples between MyISAM (packed) and Archive.
|
2004-05-25 22:27:01 +02:00
|
|
|
|
|
|
|
Table with 76695844 identical rows:
|
|
|
|
29680807 a_archive.ARZ
|
|
|
|
920350317 a.MYD
|
|
|
|
|
|
|
|
|
|
|
|
Table with 8991478 rows (all of Slashdot's comments):
|
|
|
|
1922964506 comment_archive.ARZ
|
|
|
|
2944970297 comment_text.MYD
|
|
|
|
|
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
TODO:
|
|
|
|
Add bzip optional support.
|
|
|
|
Allow users to set compression level.
|
|
|
|
Add truncate table command.
|
|
|
|
Implement versioning, should be easy.
|
|
|
|
Allow for errors, find a way to mark bad rows.
|
2005-12-23 04:50:10 +01:00
|
|
|
Talk to the azip guys, come up with a writable format so that updates are doable
|
2004-05-21 03:13:11 +02:00
|
|
|
without switching to a block method.
|
2004-06-07 11:06:33 +02:00
|
|
|
Add optional feature so that rows can be flushed at interval (which will cause less
|
2004-10-05 10:40:00 +02:00
|
|
|
compression but may speed up ordered searches).
|
|
|
|
Checkpoint the meta file to allow for faster rebuilds.
|
|
|
|
Dirty open (right now the meta file is repaired if a crash occured).
|
|
|
|
Option to allow for dirty reads, this would lower the sync calls, which would make
|
|
|
|
inserts a lot faster, but would mean highly arbitrary reads.
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
-Brian
|
|
|
|
*/
|
2004-10-05 10:40:00 +02:00
|
|
|
/*
|
|
|
|
Notes on file formats.
|
|
|
|
The Meta file is layed out as:
|
|
|
|
check - Just an int of 254 to make sure that the the file we are opening was
|
|
|
|
never corrupted.
|
|
|
|
version - The current version of the file format.
|
|
|
|
rows - This is an unsigned long long which is the number of rows in the data
|
|
|
|
file.
|
|
|
|
check point - Reserved for future use
|
true,false -> TRUE, FALSE
Simple fixes/optimization of things discovered during review of new pushed code
include/my_sys.h:
Ensure that clear_alloc_root() interacts correctly with alloc_root_inited()
mysys/hash.c:
More comments
Simple optimization (merge identical code)
mysys/my_bitmap.c:
Change inline -> static inline
sql/examples/ha_archive.cc:
Fixed compiler warning
sql/ha_ndbcluster.cc:
true,false -> TRUE, FALSE
Change if (false) -> #ifdef NOT_USED
sql/ha_ndbcluster.h:
true,false -> TRUE, FALSE
sql/handler.cc:
More comments
Remove not needed initializations.
#ifdef not used code
sql/item_cmpfunc.h:
true,false -> TRUE, FALSE
sql/item_strfunc.cc:
Move local variables to function beginning
Remove wrong comments
sql/log_event.h:
true,false -> TRUE, FALSE
sql/sql_base.cc:
true,false -> TRUE, FALSE
More comments
sql/sql_help.cc:
true,false -> TRUE, FALSE
sql/sql_lex.cc:
Simple optimization of new code
sql/sql_parse.cc:
true,false -> TRUE, FALSE
sql/sql_prepare.cc:
true,false -> TRUE, FALSE
sql/sql_table.cc:
true,false -> TRUE, FALSE
sql/sql_yacc.yy:
true,false -> TRUE, FALSE
2004-10-14 17:03:46 +02:00
|
|
|
dirty - Status of the file, whether or not its values are the latest. This
|
|
|
|
flag is what causes a repair to occur
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
The data file:
|
|
|
|
check - Just an int of 254 to make sure that the the file we are opening was
|
|
|
|
never corrupted.
|
|
|
|
version - The current version of the file format.
|
|
|
|
data - The data is stored in a "row +blobs" format.
|
true,false -> TRUE, FALSE
Simple fixes/optimization of things discovered during review of new pushed code
include/my_sys.h:
Ensure that clear_alloc_root() interacts correctly with alloc_root_inited()
mysys/hash.c:
More comments
Simple optimization (merge identical code)
mysys/my_bitmap.c:
Change inline -> static inline
sql/examples/ha_archive.cc:
Fixed compiler warning
sql/ha_ndbcluster.cc:
true,false -> TRUE, FALSE
Change if (false) -> #ifdef NOT_USED
sql/ha_ndbcluster.h:
true,false -> TRUE, FALSE
sql/handler.cc:
More comments
Remove not needed initializations.
#ifdef not used code
sql/item_cmpfunc.h:
true,false -> TRUE, FALSE
sql/item_strfunc.cc:
Move local variables to function beginning
Remove wrong comments
sql/log_event.h:
true,false -> TRUE, FALSE
sql/sql_base.cc:
true,false -> TRUE, FALSE
More comments
sql/sql_help.cc:
true,false -> TRUE, FALSE
sql/sql_lex.cc:
Simple optimization of new code
sql/sql_parse.cc:
true,false -> TRUE, FALSE
sql/sql_prepare.cc:
true,false -> TRUE, FALSE
sql/sql_table.cc:
true,false -> TRUE, FALSE
sql/sql_yacc.yy:
true,false -> TRUE, FALSE
2004-10-14 17:03:46 +02:00
|
|
|
*/
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2005-02-12 19:17:33 +01:00
|
|
|
/* If the archive storage engine has been inited */
|
2005-10-03 04:44:28 +02:00
|
|
|
static bool archive_inited= FALSE;
|
2004-05-21 03:13:11 +02:00
|
|
|
/* Variables for archive share methods */
|
|
|
|
pthread_mutex_t archive_mutex;
|
|
|
|
static HASH archive_open_tables;
|
|
|
|
|
|
|
|
/* The file extension */
|
2004-10-07 08:26:40 +02:00
|
|
|
#define ARZ ".ARZ" // The data file
|
|
|
|
#define ARN ".ARN" // Files used during an optimize call
|
|
|
|
#define ARM ".ARM" // Meta file
|
|
|
|
/*
|
|
|
|
uchar + uchar + ulonglong + ulonglong + uchar
|
|
|
|
*/
|
|
|
|
#define META_BUFFER_SIZE 19 // Size of the data used in the meta file
|
|
|
|
/*
|
|
|
|
uchar + uchar
|
|
|
|
*/
|
|
|
|
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
|
|
|
|
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2005-11-07 16:25:06 +01:00
|
|
|
/* Static declarations for handerton */
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 21:45:02 +01:00
|
|
|
static handler *archive_create_handler(TABLE_SHARE *table);
|
2005-11-07 16:25:06 +01:00
|
|
|
|
|
|
|
|
2005-02-16 17:34:02 +01:00
|
|
|
/* dummy handlerton - only to have something to return from archive_db_init */
|
2005-09-19 21:06:23 +02:00
|
|
|
handlerton archive_hton = {
|
2005-12-21 19:18:40 +01:00
|
|
|
MYSQL_HANDLERTON_INTERFACE_VERSION,
|
2005-10-04 03:42:30 +02:00
|
|
|
"ARCHIVE",
|
2005-10-03 04:44:28 +02:00
|
|
|
SHOW_OPTION_YES,
|
|
|
|
"Archive storage engine",
|
|
|
|
DB_TYPE_ARCHIVE_DB,
|
|
|
|
archive_db_init,
|
2005-02-16 17:34:02 +01:00
|
|
|
0, /* slot */
|
|
|
|
0, /* savepoint size. */
|
2005-07-20 18:02:36 +02:00
|
|
|
NULL, /* close_connection */
|
|
|
|
NULL, /* savepoint */
|
|
|
|
NULL, /* rollback to savepoint */
|
|
|
|
NULL, /* releas savepoint */
|
|
|
|
NULL, /* commit */
|
|
|
|
NULL, /* rollback */
|
|
|
|
NULL, /* prepare */
|
|
|
|
NULL, /* recover */
|
|
|
|
NULL, /* commit_by_xid */
|
|
|
|
NULL, /* rollback_by_xid */
|
|
|
|
NULL, /* create_cursor_read_view */
|
|
|
|
NULL, /* set_cursor_read_view */
|
|
|
|
NULL, /* close_cursor_read_view */
|
2005-11-07 16:25:06 +01:00
|
|
|
archive_create_handler, /* Create a new handler */
|
|
|
|
NULL, /* Drop a database */
|
|
|
|
archive_db_end, /* Panic call */
|
|
|
|
NULL, /* Start Consistent Snapshot */
|
|
|
|
NULL, /* Flush logs */
|
|
|
|
NULL, /* Show status */
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 20:21:12 +02:00
|
|
|
HTON_NO_FLAGS
|
2005-02-16 17:34:02 +01:00
|
|
|
};
|
|
|
|
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 21:45:02 +01:00
|
|
|
static handler *archive_create_handler(TABLE_SHARE *table)
|
2005-11-07 16:25:06 +01:00
|
|
|
{
|
|
|
|
return new ha_archive(table);
|
|
|
|
}
|
2005-02-16 17:34:02 +01:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
/*
|
|
|
|
Used for hash table that tracks open tables.
|
|
|
|
*/
|
|
|
|
static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
|
|
|
{
|
|
|
|
*length=share->table_name_length;
|
|
|
|
return (byte*) share->table_name;
|
|
|
|
}
|
|
|
|
|
2005-01-26 15:27:31 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Initialize the archive handler.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
archive_db_init()
|
|
|
|
void
|
|
|
|
|
|
|
|
RETURN
|
2005-10-03 04:44:28 +02:00
|
|
|
FALSE OK
|
|
|
|
TRUE Error
|
2005-01-26 15:27:31 +01:00
|
|
|
*/
|
|
|
|
|
2005-10-03 04:44:28 +02:00
|
|
|
bool archive_db_init()
|
2005-01-26 15:27:31 +01:00
|
|
|
{
|
2005-10-03 04:44:28 +02:00
|
|
|
DBUG_ENTER("archive_db_init");
|
|
|
|
if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
|
|
|
|
goto error;
|
2005-02-16 17:34:02 +01:00
|
|
|
if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
|
|
|
|
(hash_get_key) archive_get_key, 0, 0))
|
2005-10-03 04:44:28 +02:00
|
|
|
{
|
|
|
|
VOID(pthread_mutex_destroy(&archive_mutex));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
archive_inited= TRUE;
|
|
|
|
DBUG_RETURN(FALSE);
|
|
|
|
}
|
|
|
|
error:
|
|
|
|
have_archive_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
|
|
|
|
DBUG_RETURN(TRUE);
|
2005-01-26 15:27:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Release the archive handler.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
archive_db_end()
|
|
|
|
void
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
FALSE OK
|
|
|
|
*/
|
|
|
|
|
2005-11-07 16:25:06 +01:00
|
|
|
int archive_db_end(ha_panic_function type)
|
2005-01-26 15:27:31 +01:00
|
|
|
{
|
2005-02-12 19:17:33 +01:00
|
|
|
if (archive_inited)
|
|
|
|
{
|
|
|
|
hash_free(&archive_open_tables);
|
|
|
|
VOID(pthread_mutex_destroy(&archive_mutex));
|
|
|
|
}
|
|
|
|
archive_inited= 0;
|
2005-11-07 16:25:06 +01:00
|
|
|
return 0;
|
2005-01-26 15:27:31 +01:00
|
|
|
}
|
|
|
|
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 21:45:02 +01:00
|
|
|
ha_archive::ha_archive(TABLE_SHARE *table_arg)
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 20:21:12 +02:00
|
|
|
:handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
|
|
|
|
{
|
|
|
|
/* Set our original buffer from pre-allocated memory */
|
|
|
|
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
|
|
|
|
|
|
|
|
/* The size of the offset value we will use for position() */
|
2005-11-16 15:17:08 +01:00
|
|
|
ref_length = 2 << ((zlibCompileFlags() >> 6) & 3);
|
|
|
|
DBUG_ASSERT(ref_length <= sizeof(z_off_t));
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 20:21:12 +02:00
|
|
|
}
|
2005-01-26 15:27:31 +01:00
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
/*
|
|
|
|
This method reads the header of a datafile and returns whether or not it was successful.
|
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
int ha_archive::read_data_header(azio_stream *file_to_read)
|
2004-10-05 10:40:00 +02:00
|
|
|
{
|
2004-10-07 08:26:40 +02:00
|
|
|
uchar data_buffer[DATA_BUFFER_SIZE];
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_ENTER("ha_archive::read_data_header");
|
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azrewind(file_to_read) == -1)
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_RETURN(errno ? errno : -1);
|
2004-10-07 08:26:40 +02:00
|
|
|
|
|
|
|
DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
|
|
|
|
DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1]));
|
|
|
|
|
|
|
|
if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
|
|
|
|
(data_buffer[1] != (uchar)ARCHIVE_VERSION))
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
/*
|
2004-10-05 10:40:00 +02:00
|
|
|
This method writes out the header of a datafile and returns whether or not it was successful.
|
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
int ha_archive::write_data_header(azio_stream *file_to_write)
|
2004-10-05 10:40:00 +02:00
|
|
|
{
|
2004-10-07 08:26:40 +02:00
|
|
|
uchar data_buffer[DATA_BUFFER_SIZE];
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_ENTER("ha_archive::write_data_header");
|
|
|
|
|
2004-10-07 08:26:40 +02:00
|
|
|
data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
|
|
|
|
data_buffer[1]= (uchar)ARCHIVE_VERSION;
|
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
|
2005-08-29 14:04:15 +02:00
|
|
|
DATA_BUFFER_SIZE)
|
2004-10-05 10:40:00 +02:00
|
|
|
goto error;
|
2004-10-07 08:26:40 +02:00
|
|
|
DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
|
|
|
|
DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1]));
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
error:
|
|
|
|
DBUG_RETURN(errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
This method reads the header of a meta file and returns whether or not it was successful.
|
|
|
|
*rows will contain the current number of rows in the data file upon success.
|
|
|
|
*/
|
2005-06-18 01:55:42 +02:00
|
|
|
int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
|
2004-10-05 10:40:00 +02:00
|
|
|
{
|
2004-10-07 08:26:40 +02:00
|
|
|
uchar meta_buffer[META_BUFFER_SIZE];
|
2004-10-05 10:40:00 +02:00
|
|
|
ulonglong check_point;
|
|
|
|
|
|
|
|
DBUG_ENTER("ha_archive::read_meta_file");
|
|
|
|
|
|
|
|
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
|
2004-10-07 08:26:40 +02:00
|
|
|
if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Parse out the meta data, we ignore version at the moment
|
|
|
|
*/
|
2005-06-18 01:55:42 +02:00
|
|
|
*rows= (ha_rows)uint8korr(meta_buffer + 2);
|
2004-10-07 08:26:40 +02:00
|
|
|
check_point= uint8korr(meta_buffer + 10);
|
|
|
|
|
|
|
|
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
|
|
|
|
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
|
|
|
|
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows));
|
|
|
|
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point));
|
|
|
|
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18]));
|
|
|
|
|
|
|
|
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
|
|
|
|
((bool)meta_buffer[18] == TRUE))
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
my_sync(meta_file, MYF(MY_WME));
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
This method writes out the header of a meta file and returns whether or not it was successful.
|
|
|
|
By setting dirty you say whether or not the file represents the actual state of the data file.
|
2004-12-09 10:48:07 +01:00
|
|
|
Upon ::open() we set to dirty, and upon ::close() we set to clean.
|
2004-10-05 10:40:00 +02:00
|
|
|
*/
|
2005-06-18 01:55:42 +02:00
|
|
|
int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
|
2004-10-05 10:40:00 +02:00
|
|
|
{
|
2004-10-07 08:26:40 +02:00
|
|
|
uchar meta_buffer[META_BUFFER_SIZE];
|
|
|
|
ulonglong check_point= 0; //Reserved for the future
|
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_ENTER("ha_archive::write_meta_file");
|
|
|
|
|
2004-10-07 08:26:40 +02:00
|
|
|
meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
|
|
|
|
meta_buffer[1]= (uchar)ARCHIVE_VERSION;
|
2005-07-11 06:17:03 +02:00
|
|
|
int8store(meta_buffer + 2, (ulonglong)rows);
|
2004-10-07 08:26:40 +02:00
|
|
|
int8store(meta_buffer + 10, check_point);
|
|
|
|
*(meta_buffer + 18)= (uchar)dirty;
|
|
|
|
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
|
|
|
|
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
|
2005-07-11 06:17:03 +02:00
|
|
|
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
|
2004-10-07 08:26:40 +02:00
|
|
|
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
|
|
|
|
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
|
2004-10-07 08:26:40 +02:00
|
|
|
if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
my_sync(meta_file, MYF(MY_WME));
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
We create the shared memory space that we will use for the open table.
|
2005-07-11 06:17:03 +02:00
|
|
|
No matter what we try to get or create a share. This is so that a repair
|
|
|
|
table operation can occur.
|
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
See ha_example.cc for a longer description.
|
2004-05-21 03:13:11 +02:00
|
|
|
*/
|
2004-10-05 10:40:00 +02:00
|
|
|
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
|
|
|
ARCHIVE_SHARE *share;
|
2004-10-05 10:40:00 +02:00
|
|
|
char meta_file_name[FN_REFLEN];
|
2004-05-21 03:13:11 +02:00
|
|
|
uint length;
|
|
|
|
char *tmp_name;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&archive_mutex);
|
|
|
|
length=(uint) strlen(table_name);
|
|
|
|
|
|
|
|
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
|
|
|
|
(byte*) table_name,
|
|
|
|
length)))
|
|
|
|
{
|
2004-07-20 22:25:55 +02:00
|
|
|
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
|
2004-05-21 03:13:11 +02:00
|
|
|
&share, sizeof(*share),
|
|
|
|
&tmp_name, length+1,
|
2004-07-20 22:25:55 +02:00
|
|
|
NullS))
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&archive_mutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
share->use_count= 0;
|
|
|
|
share->table_name_length= length;
|
|
|
|
share->table_name= tmp_name;
|
2004-12-09 10:48:07 +01:00
|
|
|
share->crashed= FALSE;
|
2004-05-21 03:13:11 +02:00
|
|
|
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
|
2004-10-05 10:40:00 +02:00
|
|
|
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
|
2004-05-21 03:13:11 +02:00
|
|
|
strmov(share->table_name,table_name);
|
2004-10-05 10:40:00 +02:00
|
|
|
/*
|
|
|
|
We will use this lock for rows.
|
|
|
|
*/
|
|
|
|
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
|
|
|
|
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
|
2005-07-11 06:17:03 +02:00
|
|
|
share->crashed= TRUE;
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
After we read, we set the file to dirty. When we close, we will do the
|
2004-12-09 10:48:07 +01:00
|
|
|
opposite. If the meta file will not open we assume it is crashed and
|
|
|
|
leave it up to the user to fix.
|
2004-10-05 10:40:00 +02:00
|
|
|
*/
|
2004-12-09 10:48:07 +01:00
|
|
|
if (read_meta_file(share->meta_file, &share->rows_recorded))
|
|
|
|
share->crashed= TRUE;
|
|
|
|
else
|
|
|
|
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
|
2005-01-29 01:43:10 +01:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
/*
|
2004-06-07 11:06:33 +02:00
|
|
|
It is expensive to open and close the data files and since you can't have
|
|
|
|
a gzip file that can be both read and written we keep a writer open
|
|
|
|
that is shared amoung all open tables.
|
2004-05-21 03:13:11 +02:00
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
if (!(azopen(&(share->archive_write), share->data_file_name, O_WRONLY|O_APPEND|O_BINARY)))
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("Could not open archive write file"));
|
2005-07-11 06:17:03 +02:00
|
|
|
share->crashed= TRUE;
|
2005-12-23 04:50:10 +01:00
|
|
|
}
|
2005-07-11 06:17:03 +02:00
|
|
|
VOID(my_hash_insert(&archive_open_tables, (byte*) share));
|
2005-01-10 23:59:28 +01:00
|
|
|
thr_lock_init(&share->lock);
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
share->use_count++;
|
|
|
|
pthread_mutex_unlock(&archive_mutex);
|
|
|
|
|
|
|
|
return share;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2004-10-05 10:40:00 +02:00
|
|
|
Free the share.
|
2004-05-21 03:13:11 +02:00
|
|
|
See ha_example.cc for a description.
|
|
|
|
*/
|
2004-10-05 10:40:00 +02:00
|
|
|
int ha_archive::free_share(ARCHIVE_SHARE *share)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
pthread_mutex_lock(&archive_mutex);
|
|
|
|
if (!--share->use_count)
|
|
|
|
{
|
|
|
|
hash_delete(&archive_open_tables, (byte*) share);
|
|
|
|
thr_lock_delete(&share->lock);
|
2004-10-05 10:40:00 +02:00
|
|
|
VOID(pthread_mutex_destroy(&share->mutex));
|
|
|
|
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azclose(&(share->archive_write)))
|
2004-08-23 12:46:51 +02:00
|
|
|
rc= 1;
|
2005-01-29 01:43:10 +01:00
|
|
|
if (my_close(share->meta_file, MYF(0)))
|
|
|
|
rc= 1;
|
2004-07-07 10:47:29 +02:00
|
|
|
my_free((gptr) share, MYF(0));
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&archive_mutex);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-27 11:25:08 +02:00
|
|
|
/*
|
2004-05-21 03:13:11 +02:00
|
|
|
We just implement one additional file extension.
|
|
|
|
*/
|
2005-04-27 11:25:08 +02:00
|
|
|
static const char *ha_archive_exts[] = {
|
|
|
|
ARZ,
|
|
|
|
ARM,
|
|
|
|
NullS
|
|
|
|
};
|
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
const char **ha_archive::bas_ext() const
|
2005-04-27 11:25:08 +02:00
|
|
|
{
|
|
|
|
return ha_archive_exts;
|
|
|
|
}
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
When opening a file we:
|
|
|
|
Create/get our shared structure.
|
|
|
|
Init out lock.
|
|
|
|
We open the file we will read from.
|
|
|
|
*/
|
|
|
|
int ha_archive::open(const char *name, int mode, uint test_if_locked)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::open");
|
|
|
|
|
2004-06-07 11:06:33 +02:00
|
|
|
if (!(share= get_share(name, table)))
|
2005-07-11 06:17:03 +02:00
|
|
|
DBUG_RETURN(HA_ERR_OUT_OF_MEM); // Not handled well by calling code!
|
2004-05-21 03:13:11 +02:00
|
|
|
thr_lock_data_init(&share->lock,&lock,NULL);
|
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
|
2004-06-07 11:06:33 +02:00
|
|
|
{
|
2005-07-11 06:17:03 +02:00
|
|
|
if (errno == EROFS || errno == EACCES)
|
|
|
|
DBUG_RETURN(my_errno= errno);
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-06-07 11:06:33 +02:00
|
|
|
}
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2004-08-23 12:46:51 +02:00
|
|
|
Closes the file.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
close();
|
|
|
|
|
|
|
|
IMPLEMENTATION:
|
|
|
|
|
|
|
|
We first close this storage engines file handle to the archive and
|
|
|
|
then remove our reference count to the table (and possibly free it
|
|
|
|
as well).
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 ok
|
|
|
|
1 Error
|
|
|
|
*/
|
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
int ha_archive::close(void)
|
|
|
|
{
|
2004-08-23 12:46:51 +02:00
|
|
|
int rc= 0;
|
2004-05-21 03:13:11 +02:00
|
|
|
DBUG_ENTER("ha_archive::close");
|
2004-08-23 12:46:51 +02:00
|
|
|
|
|
|
|
/* First close stream */
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azclose(&archive))
|
2004-08-23 12:46:51 +02:00
|
|
|
rc= 1;
|
|
|
|
/* then also close share */
|
|
|
|
rc|= free_share(share);
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2004-10-07 08:26:40 +02:00
|
|
|
We create our data file here. The format is pretty simple.
|
|
|
|
You can read about the format of the data file above.
|
|
|
|
Unlike other storage engines we do not "pack" our data. Since we
|
|
|
|
are about to do a general compression, packing would just be a waste of
|
|
|
|
CPU time. If the table has blobs they are written after the row in the order
|
|
|
|
of creation.
|
2004-08-23 12:46:51 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_archive::create(const char *name, TABLE *table_arg,
|
|
|
|
HA_CREATE_INFO *create_info)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
2004-10-05 10:40:00 +02:00
|
|
|
File create_file; // We use to create the datafile and the metafile
|
2004-05-21 03:13:11 +02:00
|
|
|
char name_buff[FN_REFLEN];
|
2004-08-23 12:46:51 +02:00
|
|
|
int error;
|
2004-05-21 03:13:11 +02:00
|
|
|
DBUG_ENTER("ha_archive::create");
|
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
|
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
|
|
|
|
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
|
|
|
|
{
|
|
|
|
error= my_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
write_meta_file(create_file, 0, FALSE);
|
|
|
|
my_close(create_file,MYF(0));
|
|
|
|
|
|
|
|
/*
|
|
|
|
We reuse name_buff since it is available.
|
|
|
|
*/
|
2004-08-23 12:46:51 +02:00
|
|
|
if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,
|
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
|
|
|
|
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
|
|
|
|
{
|
|
|
|
error= my_errno;
|
2004-10-05 10:40:00 +02:00
|
|
|
goto error;
|
2004-08-23 12:46:51 +02:00
|
|
|
}
|
2005-12-23 04:50:10 +01:00
|
|
|
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
|
2004-06-07 11:06:33 +02:00
|
|
|
{
|
2004-08-23 12:46:51 +02:00
|
|
|
error= errno;
|
2005-01-10 23:59:28 +01:00
|
|
|
goto error2;
|
2004-06-07 11:06:33 +02:00
|
|
|
}
|
2005-12-23 04:50:10 +01:00
|
|
|
if (write_data_header(&archive))
|
2004-06-07 11:06:33 +02:00
|
|
|
{
|
2005-01-10 23:59:28 +01:00
|
|
|
error= errno;
|
|
|
|
goto error3;
|
2004-06-07 11:06:33 +02:00
|
|
|
}
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azclose(&archive))
|
2005-01-11 01:01:35 +01:00
|
|
|
{
|
2005-01-10 23:59:28 +01:00
|
|
|
error= errno;
|
2004-10-05 10:40:00 +02:00
|
|
|
goto error2;
|
2005-01-10 23:59:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
my_close(create_file, MYF(0));
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
DBUG_RETURN(0);
|
2004-08-23 12:46:51 +02:00
|
|
|
|
2005-01-10 23:59:28 +01:00
|
|
|
error3:
|
2005-12-23 04:50:10 +01:00
|
|
|
/* We already have an error, so ignore results of azclose. */
|
|
|
|
(void)azclose(&archive);
|
2004-10-05 10:40:00 +02:00
|
|
|
error2:
|
2005-01-10 23:59:28 +01:00
|
|
|
my_close(create_file, MYF(0));
|
|
|
|
delete_table(name);
|
2004-10-05 10:40:00 +02:00
|
|
|
error:
|
2004-08-23 12:46:51 +02:00
|
|
|
/* Return error number, if we got one */
|
|
|
|
DBUG_RETURN(error ? error : -1);
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
|
2005-07-11 03:19:37 +02:00
|
|
|
/*
|
|
|
|
This is where the actual row is written out.
|
2004-05-21 03:13:11 +02:00
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
int ha_archive::real_write_row(byte *buf, azio_stream *writer)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
|
|
|
z_off_t written;
|
2005-01-07 03:34:17 +01:00
|
|
|
uint *ptr, *end;
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_ENTER("ha_archive::real_write_row");
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
written= azwrite(writer, buf, table->s->reclength);
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength));
|
2004-11-30 11:52:14 +01:00
|
|
|
if (!delayed_insert || !bulk_insert)
|
2004-11-29 10:40:25 +01:00
|
|
|
share->dirty= TRUE;
|
|
|
|
|
2005-06-02 06:56:30 +02:00
|
|
|
if (written != (z_off_t)table->s->reclength)
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_RETURN(errno ? errno : -1);
|
2004-10-05 10:40:00 +02:00
|
|
|
/*
|
|
|
|
We should probably mark the table as damagaged if the record is written
|
|
|
|
but the blob fails.
|
|
|
|
*/
|
2005-02-25 16:12:06 +01:00
|
|
|
for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
|
2005-01-07 03:34:17 +01:00
|
|
|
ptr != end ;
|
|
|
|
ptr++)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
2005-02-25 15:53:22 +01:00
|
|
|
char *data_ptr;
|
2005-01-07 03:34:17 +01:00
|
|
|
uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
if (size)
|
|
|
|
{
|
2005-02-25 15:53:22 +01:00
|
|
|
((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
|
2005-12-23 04:50:10 +01:00
|
|
|
written= azwrite(writer, data_ptr, (unsigned)size);
|
2005-06-02 02:34:10 +02:00
|
|
|
if (written != (z_off_t)size)
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_RETURN(errno ? errno : -1);
|
2004-10-05 10:40:00 +02:00
|
|
|
}
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Look at ha_archive::open() for an explanation of the row format.
|
|
|
|
Here we just write out the row.
|
|
|
|
|
|
|
|
Wondering about start_bulk_insert()? We don't implement it for
|
|
|
|
archive since it optimizes for lots of writes. The only save
|
|
|
|
for implementing start_bulk_insert() is that we could skip
|
|
|
|
setting dirty to true each time.
|
|
|
|
*/
|
|
|
|
int ha_archive::write_row(byte *buf)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
DBUG_ENTER("ha_archive::write_row");
|
|
|
|
|
|
|
|
if (share->crashed)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
|
|
|
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
|
|
|
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
|
|
|
table->timestamp_field->set_time();
|
|
|
|
pthread_mutex_lock(&share->mutex);
|
2004-10-05 10:40:00 +02:00
|
|
|
share->rows_recorded++;
|
2005-12-23 04:50:10 +01:00
|
|
|
rc= real_write_row(buf, &(share->archive_write));
|
2004-10-05 10:40:00 +02:00
|
|
|
pthread_mutex_unlock(&share->mutex);
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_RETURN(rc);
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
All calls that need to scan the table start with this method. If we are told
|
|
|
|
that it is a table scan we rewind the file to the beginning, otherwise
|
|
|
|
we assume the position will be set.
|
|
|
|
*/
|
2004-08-23 12:46:51 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
int ha_archive::rnd_init(bool scan)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::rnd_init");
|
2004-12-09 10:48:07 +01:00
|
|
|
|
|
|
|
if (share->crashed)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-06-07 11:06:33 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
/* We rewind the file so that we can read from the beginning if scan */
|
2004-10-05 10:40:00 +02:00
|
|
|
if (scan)
|
2004-06-07 11:06:33 +02:00
|
|
|
{
|
2004-10-05 10:40:00 +02:00
|
|
|
scan_rows= share->rows_recorded;
|
2004-06-07 11:06:33 +02:00
|
|
|
records= 0;
|
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
/*
|
|
|
|
If dirty, we lock, and then reset/flush the data.
|
2005-12-23 04:50:10 +01:00
|
|
|
I found that just calling azflush() doesn't always work.
|
2004-10-05 10:40:00 +02:00
|
|
|
*/
|
2004-08-23 12:46:51 +02:00
|
|
|
if (share->dirty == TRUE)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
2004-10-05 10:40:00 +02:00
|
|
|
pthread_mutex_lock(&share->mutex);
|
|
|
|
if (share->dirty == TRUE)
|
|
|
|
{
|
2005-12-23 04:50:10 +01:00
|
|
|
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
2004-10-05 10:40:00 +02:00
|
|
|
share->dirty= FALSE;
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&share->mutex);
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (read_data_header(&archive))
|
2004-10-05 10:40:00 +02:00
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-06-07 11:06:33 +02:00
|
|
|
}
|
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
This is the method that is used to read a row. It assumes that the row is
|
|
|
|
positioned where you want it.
|
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
2005-12-23 04:50:10 +01:00
|
|
|
int read; // Bytes read, azread() returns int
|
2005-01-07 03:34:17 +01:00
|
|
|
uint *ptr, *end;
|
2004-05-21 03:13:11 +02:00
|
|
|
char *last;
|
|
|
|
size_t total_blob_length= 0;
|
2004-06-07 11:06:33 +02:00
|
|
|
DBUG_ENTER("ha_archive::get_row");
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
read= azread(file_to_read, buf, table->s->reclength);
|
2005-01-07 03:34:17 +01:00
|
|
|
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength));
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
if (read == Z_STREAM_ERROR)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
/* If we read nothing we are at the end of the file */
|
|
|
|
if (read == 0)
|
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
|
|
|
|
2004-11-30 11:52:14 +01:00
|
|
|
/*
|
|
|
|
If the record is the wrong size, the file is probably damaged, unless
|
|
|
|
we are dealing with a delayed insert or a bulk insert.
|
2004-11-29 10:40:25 +01:00
|
|
|
*/
|
2005-01-07 03:34:17 +01:00
|
|
|
if ((ulong) read != table->s->reclength)
|
2004-11-30 11:52:14 +01:00
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
/* Calculate blob length, we use this for our buffer */
|
2005-01-07 03:34:17 +01:00
|
|
|
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
|
|
|
|
ptr != end ;
|
|
|
|
ptr++)
|
|
|
|
total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
|
2004-05-21 03:13:11 +02:00
|
|
|
|
|
|
|
/* Adjust our row buffer if we need be */
|
|
|
|
buffer.alloc(total_blob_length);
|
2004-06-07 11:06:33 +02:00
|
|
|
last= (char *)buffer.ptr();
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2004-06-07 11:06:33 +02:00
|
|
|
/* Loop through our blobs and read them */
|
2005-01-07 03:34:17 +01:00
|
|
|
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
|
|
|
|
ptr != end ;
|
|
|
|
ptr++)
|
2004-05-21 03:13:11 +02:00
|
|
|
{
|
2005-01-07 03:34:17 +01:00
|
|
|
size_t size= ((Field_blob*) table->field[*ptr])->get_length();
|
2004-10-05 10:40:00 +02:00
|
|
|
if (size)
|
|
|
|
{
|
2005-12-23 04:50:10 +01:00
|
|
|
read= azread(file_to_read, last, size);
|
2004-10-05 10:40:00 +02:00
|
|
|
if ((size_t) read != size)
|
2004-11-30 11:52:14 +01:00
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
2005-01-07 03:34:17 +01:00
|
|
|
((Field_blob*) table->field[*ptr])->set_ptr(size, last);
|
2004-10-05 10:40:00 +02:00
|
|
|
last += size;
|
|
|
|
}
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2004-08-23 12:46:51 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
/*
|
|
|
|
Called during ORDER BY. Its position is either from being called sequentially
|
|
|
|
or by having had ha_archive::rnd_pos() called before it is called.
|
|
|
|
*/
|
2004-08-23 12:46:51 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
int ha_archive::rnd_next(byte *buf)
|
|
|
|
{
|
|
|
|
int rc;
|
2004-08-23 12:46:51 +02:00
|
|
|
DBUG_ENTER("ha_archive::rnd_next");
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2004-12-09 10:48:07 +01:00
|
|
|
if (share->crashed)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
2004-10-05 10:40:00 +02:00
|
|
|
if (!scan_rows)
|
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
|
|
|
scan_rows--;
|
|
|
|
|
2004-11-06 07:49:15 +01:00
|
|
|
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
|
|
|
|
&LOCK_status);
|
2005-12-23 04:50:10 +01:00
|
|
|
current_position= aztell(&archive);
|
|
|
|
rc= get_row(&archive, buf);
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
|
2004-08-23 12:46:51 +02:00
|
|
|
if (rc != HA_ERR_END_OF_FILE)
|
2004-05-21 03:13:11 +02:00
|
|
|
records++;
|
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-02-17 22:52:40 +01:00
|
|
|
/*
|
2004-05-21 03:13:11 +02:00
|
|
|
Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
|
|
|
|
each call to ha_archive::rnd_next() if an ordering of the rows is
|
|
|
|
needed.
|
|
|
|
*/
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
void ha_archive::position(const byte *record)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::position");
|
2005-02-17 22:52:40 +01:00
|
|
|
my_store_ptr(ref, ref_length, current_position);
|
2004-05-21 03:13:11 +02:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2004-08-23 12:46:51 +02:00
|
|
|
This is called after a table scan for each row if the results of the
|
|
|
|
scan need to be ordered. It will take *pos and use it to move the
|
|
|
|
cursor in the file so that the next row that is called is the
|
|
|
|
correctly ordered row.
|
2004-05-21 03:13:11 +02:00
|
|
|
*/
|
2004-08-23 12:46:51 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
int ha_archive::rnd_pos(byte * buf, byte *pos)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::rnd_pos");
|
2004-11-06 07:49:15 +01:00
|
|
|
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
|
|
|
|
&LOCK_status);
|
2005-06-18 01:55:42 +02:00
|
|
|
current_position= (z_off_t)my_get_ptr(pos, ref_length);
|
2005-12-23 04:50:10 +01:00
|
|
|
(void)azseek(&archive, current_position, SEEK_SET);
|
2004-05-21 03:13:11 +02:00
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
DBUG_RETURN(get_row(&archive, buf));
|
2004-10-05 10:40:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-12-09 10:48:07 +01:00
|
|
|
This method repairs the meta file. It does this by walking the datafile and
|
2005-07-11 06:17:03 +02:00
|
|
|
rewriting the meta file. Currently it does this by calling optimize with
|
|
|
|
the extended flag.
|
2004-10-05 10:40:00 +02:00
|
|
|
*/
|
2004-12-09 10:48:07 +01:00
|
|
|
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
|
2004-10-05 10:40:00 +02:00
|
|
|
{
|
2004-12-09 10:48:07 +01:00
|
|
|
DBUG_ENTER("ha_archive::repair");
|
2005-07-11 06:17:03 +02:00
|
|
|
check_opt->flags= T_EXTEND;
|
|
|
|
int rc= optimize(thd, check_opt);
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2005-07-11 06:17:03 +02:00
|
|
|
if (rc)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
|
2004-10-05 10:40:00 +02:00
|
|
|
|
2004-12-09 10:48:07 +01:00
|
|
|
share->crashed= FALSE;
|
2005-07-11 06:17:03 +02:00
|
|
|
DBUG_RETURN(0);
|
2004-05-21 03:13:11 +02:00
|
|
|
}
|
|
|
|
|
2004-09-21 03:33:22 +02:00
|
|
|
/*
|
|
|
|
The table can become fragmented if data was inserted, read, and then
|
|
|
|
inserted again. What we do is open up the file and recompress it completely.
|
2004-10-05 10:40:00 +02:00
|
|
|
*/
|
2004-09-21 03:33:22 +02:00
|
|
|
int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::optimize");
|
2005-07-11 03:19:37 +02:00
|
|
|
int rc;
|
2005-12-23 04:50:10 +01:00
|
|
|
azio_stream writer;
|
2004-09-21 03:33:22 +02:00
|
|
|
char writer_filename[FN_REFLEN];
|
|
|
|
|
2005-07-11 03:19:37 +02:00
|
|
|
/* Flush any waiting data */
|
2005-12-23 04:50:10 +01:00
|
|
|
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
2004-12-09 10:48:07 +01:00
|
|
|
|
2004-09-21 03:33:22 +02:00
|
|
|
/* Lets create a file to contain the new data */
|
2004-10-05 10:40:00 +02:00
|
|
|
fn_format(writer_filename, share->table_name, "", ARN,
|
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
|
2004-09-21 03:33:22 +02:00
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
|
2005-07-11 03:19:37 +02:00
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
An extended rebuild is a lot more effort. We open up each row and re-record it.
|
|
|
|
Any dead rows are removed (aka rows that may have been partially recorded).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (check_opt->flags == T_EXTEND)
|
2004-09-21 03:33:22 +02:00
|
|
|
{
|
2005-12-23 04:50:10 +01:00
|
|
|
DBUG_PRINT("info", ("archive extended rebuild"));
|
2005-07-11 03:19:37 +02:00
|
|
|
byte *buf;
|
2004-09-21 03:33:22 +02:00
|
|
|
|
2005-07-11 03:19:37 +02:00
|
|
|
/*
|
|
|
|
First we create a buffer that we can use for reading rows, and can pass
|
|
|
|
to get_row().
|
|
|
|
*/
|
|
|
|
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
|
|
|
|
{
|
|
|
|
rc= HA_ERR_OUT_OF_MEM;
|
|
|
|
goto error;
|
|
|
|
}
|
2004-09-21 03:33:22 +02:00
|
|
|
|
2005-07-11 03:19:37 +02:00
|
|
|
/*
|
|
|
|
Now we will rewind the archive file so that we are positioned at the
|
|
|
|
start of the file.
|
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
rc= read_data_header(&archive);
|
2005-07-11 03:19:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Assuming now error from rewinding the archive file, we now write out the
|
|
|
|
new header for out data file.
|
|
|
|
*/
|
|
|
|
if (!rc)
|
2005-12-23 04:50:10 +01:00
|
|
|
rc= write_data_header(&writer);
|
2005-07-11 03:19:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
On success of writing out the new header, we now fetch each row and
|
|
|
|
insert it into the new archive file.
|
|
|
|
*/
|
|
|
|
if (!rc)
|
2005-07-11 06:17:03 +02:00
|
|
|
{
|
|
|
|
share->rows_recorded= 0;
|
2005-12-23 04:50:10 +01:00
|
|
|
while (!(rc= get_row(&archive, buf)))
|
2005-07-11 06:17:03 +02:00
|
|
|
{
|
2005-12-23 04:50:10 +01:00
|
|
|
real_write_row(buf, &writer);
|
2005-07-11 06:17:03 +02:00
|
|
|
share->rows_recorded++;
|
|
|
|
}
|
|
|
|
}
|
2005-07-11 03:19:37 +02:00
|
|
|
|
2005-07-11 14:14:42 +02:00
|
|
|
my_free((char*)buf, MYF(0));
|
2005-07-11 03:19:37 +02:00
|
|
|
if (rc && rc != HA_ERR_END_OF_FILE)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2005-12-23 04:50:10 +01:00
|
|
|
DBUG_PRINT("info", ("archive quick rebuild"));
|
2005-07-11 03:19:37 +02:00
|
|
|
/*
|
|
|
|
The quick method is to just read the data raw, and then compress it directly.
|
|
|
|
*/
|
2005-12-23 04:50:10 +01:00
|
|
|
int read; // Bytes read, azread() returns int
|
2005-07-11 03:19:37 +02:00
|
|
|
char block[IO_SIZE];
|
2005-12-23 04:50:10 +01:00
|
|
|
if (azrewind(&archive) == -1)
|
2005-07-11 03:19:37 +02:00
|
|
|
{
|
|
|
|
rc= HA_ERR_CRASHED_ON_USAGE;
|
2005-12-23 04:50:10 +01:00
|
|
|
DBUG_PRINT("info", ("archive HA_ERR_CRASHED_ON_USAGE"));
|
2005-07-11 03:19:37 +02:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
while ((read= azread(&archive, block, IO_SIZE)))
|
|
|
|
azwrite(&writer, block, read);
|
2005-07-11 03:19:37 +02:00
|
|
|
}
|
|
|
|
|
2005-12-23 04:50:10 +01:00
|
|
|
azclose(&writer);
|
2004-09-21 03:33:22 +02:00
|
|
|
|
|
|
|
my_rename(writer_filename,share->data_file_name,MYF(0));
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
|
2005-07-11 03:19:37 +02:00
|
|
|
error:
|
2005-12-23 04:50:10 +01:00
|
|
|
azclose(&writer);
|
2005-07-11 03:19:37 +02:00
|
|
|
|
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
2004-10-05 10:40:00 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Below is an example of how to setup row level locking.
|
|
|
|
*/
|
|
|
|
THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
|
|
|
|
THR_LOCK_DATA **to,
|
|
|
|
enum thr_lock_type lock_type)
|
|
|
|
{
|
2004-11-29 10:40:25 +01:00
|
|
|
if (lock_type == TL_WRITE_DELAYED)
|
|
|
|
delayed_insert= TRUE;
|
|
|
|
else
|
|
|
|
delayed_insert= FALSE;
|
|
|
|
|
2004-10-07 08:26:40 +02:00
|
|
|
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
|
|
|
|
{
|
2004-10-05 10:40:00 +02:00
|
|
|
/*
|
|
|
|
Here is where we get into the guts of a row level lock.
|
|
|
|
If TL_UNLOCK is set
|
|
|
|
If we are not doing a LOCK TABLE or DISCARD/IMPORT
|
|
|
|
TABLESPACE, then allow multiple writers
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
|
|
|
|
lock_type <= TL_WRITE) && !thd->in_lock_tables
|
2004-10-07 08:26:40 +02:00
|
|
|
&& !thd->tablespace_op)
|
2004-10-05 10:40:00 +02:00
|
|
|
lock_type = TL_WRITE_ALLOW_WRITE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
|
|
|
|
MySQL would use the lock TL_READ_NO_INSERT on t2, and that
|
|
|
|
would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
|
|
|
|
to t2. Convert the lock to a normal read lock to allow
|
|
|
|
concurrent inserts to t2.
|
|
|
|
*/
|
|
|
|
|
2004-10-07 08:26:40 +02:00
|
|
|
if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
|
2004-10-05 10:40:00 +02:00
|
|
|
lock_type = TL_READ;
|
|
|
|
|
|
|
|
lock.type=lock_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
*to++= &lock;
|
|
|
|
|
|
|
|
return to;
|
|
|
|
}
|
|
|
|
|
2004-11-30 11:52:14 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Hints for optimizer, see ha_tina for more information
|
|
|
|
*/
|
2004-05-21 03:13:11 +02:00
|
|
|
void ha_archive::info(uint flag)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::info");
|
2004-11-30 11:52:14 +01:00
|
|
|
/*
|
|
|
|
This should be an accurate number now, though bulk and delayed inserts can
|
|
|
|
cause the number to be inaccurate.
|
|
|
|
*/
|
2004-10-05 10:40:00 +02:00
|
|
|
records= share->rows_recorded;
|
|
|
|
deleted= 0;
|
2004-12-09 10:48:07 +01:00
|
|
|
/* Costs quite a bit more to get all information */
|
|
|
|
if (flag & HA_STATUS_TIME)
|
|
|
|
{
|
|
|
|
MY_STAT file_stat; // Stat information for the data file
|
|
|
|
|
|
|
|
VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
|
|
|
|
|
2005-01-07 03:34:17 +01:00
|
|
|
mean_rec_length= table->s->reclength + buffer.alloced_length();
|
2004-12-17 01:27:52 +01:00
|
|
|
data_file_length= file_stat.st_size;
|
2004-12-09 10:48:07 +01:00
|
|
|
create_time= file_stat.st_ctime;
|
|
|
|
update_time= file_stat.st_mtime;
|
2004-12-17 01:27:52 +01:00
|
|
|
max_data_file_length= share->rows_recorded * mean_rec_length;
|
2004-12-09 10:48:07 +01:00
|
|
|
}
|
|
|
|
delete_length= 0;
|
|
|
|
index_file_length=0;
|
2004-06-07 11:06:33 +02:00
|
|
|
|
2004-05-21 03:13:11 +02:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
2004-11-30 11:52:14 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
This method tells us that a bulk insert operation is about to occur. We set
|
|
|
|
a flag which will keep write_row from saying that its data is dirty. This in
|
|
|
|
turn will keep selects from causing a sync to occur.
|
|
|
|
Basically, yet another optimizations to keep compression working well.
|
|
|
|
*/
|
|
|
|
void ha_archive::start_bulk_insert(ha_rows rows)
|
|
|
|
{
|
2004-12-09 10:48:07 +01:00
|
|
|
DBUG_ENTER("ha_archive::start_bulk_insert");
|
2004-11-30 11:52:14 +01:00
|
|
|
bulk_insert= TRUE;
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
|
|
|
|
flag, and set the share dirty so that the next select will call sync for us.
|
|
|
|
*/
|
|
|
|
int ha_archive::end_bulk_insert()
|
|
|
|
{
|
2004-12-09 10:48:07 +01:00
|
|
|
DBUG_ENTER("ha_archive::end_bulk_insert");
|
2004-11-30 11:52:14 +01:00
|
|
|
bulk_insert= FALSE;
|
|
|
|
share->dirty= TRUE;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2005-08-30 00:05:16 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
We cancel a truncate command. The only way to delete an archive table is to drop it.
|
|
|
|
This is done for security reasons. In a later version we will enable this by
|
|
|
|
allowing the user to select a different row format.
|
|
|
|
*/
|
|
|
|
int ha_archive::delete_all_rows()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_archive::delete_all_rows");
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2005-10-18 23:52:38 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
We just return state if asked.
|
|
|
|
*/
|
|
|
|
bool ha_archive::is_crashed() const
|
|
|
|
{
|
|
|
|
return share->crashed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Simple scan of the tables to make sure everything is ok.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
byte *buf;
|
|
|
|
const char *old_proc_info=thd->proc_info;
|
|
|
|
ha_rows count= share->rows_recorded;
|
|
|
|
DBUG_ENTER("ha_archive::check");
|
|
|
|
|
|
|
|
thd->proc_info= "Checking table";
|
|
|
|
/* Flush any waiting data */
|
2005-12-23 04:50:10 +01:00
|
|
|
azflush(&(share->archive_write), Z_SYNC_FLUSH);
|
2005-10-18 23:52:38 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
First we create a buffer that we can use for reading rows, and can pass
|
|
|
|
to get_row().
|
|
|
|
*/
|
|
|
|
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
|
|
|
|
rc= HA_ERR_OUT_OF_MEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now we will rewind the archive file so that we are positioned at the
|
|
|
|
start of the file.
|
|
|
|
*/
|
|
|
|
if (!rc)
|
2005-12-23 04:50:10 +01:00
|
|
|
read_data_header(&archive);
|
2005-10-18 23:52:38 +02:00
|
|
|
|
|
|
|
if (!rc)
|
2005-12-23 04:50:10 +01:00
|
|
|
while (!(rc= get_row(&archive, buf)))
|
2005-10-18 23:52:38 +02:00
|
|
|
count--;
|
|
|
|
|
|
|
|
my_free((char*)buf, MYF(0));
|
|
|
|
|
|
|
|
thd->proc_info= old_proc_info;
|
|
|
|
|
|
|
|
if ((rc && rc != HA_ERR_END_OF_FILE) || count)
|
|
|
|
{
|
|
|
|
share->crashed= FALSE;
|
|
|
|
DBUG_RETURN(HA_ADMIN_CORRUPT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_RETURN(HA_ADMIN_OK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Check and repair the table if needed.
|
|
|
|
*/
|
|
|
|
bool ha_archive::check_and_repair(THD *thd)
|
|
|
|
{
|
|
|
|
HA_CHECK_OPT check_opt;
|
|
|
|
DBUG_ENTER("ha_archive::check_and_repair");
|
|
|
|
|
|
|
|
check_opt.init();
|
|
|
|
|
|
|
|
if (check(thd, &check_opt) == HA_ADMIN_CORRUPT)
|
|
|
|
{
|
|
|
|
DBUG_RETURN(repair(thd, &check_opt));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DBUG_RETURN(HA_ADMIN_OK);
|
|
|
|
}
|
|
|
|
}
|