2004-08-12 20:57:18 -07:00
|
|
|
/* Copyright (C) 2003 MySQL AB
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
|
|
/*
|
|
|
|
Make sure to look at ha_tina.h for more details.
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
First off, this is a play thing for me, there are a number of things
|
|
|
|
wrong with it:
|
|
|
|
*) It was designed for csv and therefore its performance is highly
|
|
|
|
questionable.
|
|
|
|
*) Indexes have not been implemented. This is because the files can
|
|
|
|
be traded in and out of the table directory without having to worry
|
|
|
|
about rebuilding anything.
|
|
|
|
*) NULLs and "" are treated equally (like a spreadsheet).
|
|
|
|
*) There was in the beginning no point to anyone seeing this other
|
|
|
|
then me, so there is a good chance that I haven't quite documented
|
|
|
|
it well.
|
|
|
|
*) Less design, more "make it work"
|
|
|
|
|
|
|
|
Now there are a few cool things with it:
|
|
|
|
*) Errors can result in corrupted data files.
|
|
|
|
*) Data files can be read by spreadsheets directly.
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
TODO:
|
|
|
|
*) Move to a block system for larger files
|
|
|
|
*) Error recovery, its all there, just need to finish it
|
|
|
|
*) Document how the chains work.
|
|
|
|
|
|
|
|
-Brian
|
|
|
|
*/
|
|
|
|
|
2005-06-02 02:43:32 +02:00
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
2004-08-12 20:57:18 -07:00
|
|
|
#pragma implementation // gcc: Class implementation
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "mysql_priv.h"
|
2004-08-17 01:29:19 -07:00
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
#include "ha_tina.h"
|
|
|
|
|
2006-02-14 13:51:25 +04:00
|
|
|
#include <mysql/plugin.h>
|
2005-12-21 10:18:40 -08:00
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
/*
|
|
|
|
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
|
|
|
|
*/
|
|
|
|
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
|
|
|
|
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
|
|
|
|
#define TINA_CHECK_HEADER 254 // The number we use to determine corruption
|
|
|
|
|
|
|
|
/* The file extension */
|
|
|
|
#define CSV_EXT ".CSV" // The data file
|
|
|
|
#define CSN_EXT ".CSN" // Files used during repair
|
|
|
|
#define CSM_EXT ".CSM" // Meta file
|
|
|
|
|
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
static TINA_SHARE *get_share(const char *table_name, TABLE *table);
|
|
|
|
static int free_share(TINA_SHARE *share);
|
|
|
|
static int read_meta_file(File meta_file, ha_rows *rows);
|
|
|
|
static int write_meta_file(File meta_file, ha_rows rows, bool dirty);
|
2006-03-13 19:36:34 +03:00
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
/* Stuff for shares */
|
|
|
|
pthread_mutex_t tina_mutex;
|
|
|
|
static HASH tina_open_tables;
|
|
|
|
static int tina_init= 0;
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 22:45:02 +02:00
|
|
|
static handler *tina_create_handler(TABLE_SHARE *table);
|
2005-12-21 10:18:40 -08:00
|
|
|
static int tina_init_func();
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2006-05-02 04:11:00 -07:00
|
|
|
static const char tina_hton_name[]= "CSV";
|
|
|
|
static const char tina_hton_comment[]= "CSV storage engine";
|
|
|
|
|
2005-09-19 12:06:23 -07:00
|
|
|
handlerton tina_hton= {
|
2005-12-21 10:18:40 -08:00
|
|
|
MYSQL_HANDLERTON_INTERFACE_VERSION,
|
2006-05-02 04:11:00 -07:00
|
|
|
tina_hton_name,
|
2005-10-02 19:44:28 -07:00
|
|
|
SHOW_OPTION_YES,
|
2006-05-02 04:11:00 -07:00
|
|
|
tina_hton_comment,
|
2005-10-02 19:44:28 -07:00
|
|
|
DB_TYPE_CSV_DB,
|
2005-12-21 10:18:40 -08:00
|
|
|
(bool (*)()) tina_init_func,
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
0, /* slot */
|
|
|
|
0, /* savepoint size. */
|
2005-07-20 20:02:36 +04:00
|
|
|
NULL, /* close_connection */
|
|
|
|
NULL, /* savepoint */
|
|
|
|
NULL, /* rollback to savepoint */
|
|
|
|
NULL, /* release savepoint */
|
|
|
|
NULL, /* commit */
|
|
|
|
NULL, /* rollback */
|
|
|
|
NULL, /* prepare */
|
|
|
|
NULL, /* recover */
|
|
|
|
NULL, /* commit_by_xid */
|
|
|
|
NULL, /* rollback_by_xid */
|
|
|
|
NULL, /* create_cursor_read_view */
|
|
|
|
NULL, /* set_cursor_read_view */
|
|
|
|
NULL, /* close_cursor_read_view */
|
2005-11-07 16:25:06 +01:00
|
|
|
tina_create_handler, /* Create a new handler */
|
|
|
|
NULL, /* Drop a database */
|
|
|
|
tina_end, /* Panic call */
|
|
|
|
NULL, /* Start Consistent Snapshot */
|
|
|
|
NULL, /* Flush logs */
|
|
|
|
NULL, /* Show status */
|
WL #2604: Partition Management
Optimised version of ADD/DROP/REORGANIZE partitions for
non-NDB storage engines.
New syntax to handle REBUILD/OPTIMIZE/ANALYZE/CHECK/REPAIR partitions
Quite a few bug fixes
include/thr_lock.h:
New method to downgrade locks from TL_WRITE_ONLY
Possibility to upgrade lock while aborting locks
mysql-test/r/ndb_autodiscover.result:
Fix for lowercase and that all NDB tables are now partitioned
mysql-test/r/ndb_bitfield.result:
Fix for lowercase and that all NDB tables are now partitioned
mysql-test/r/ndb_gis.result:
Fix for lowercase and that all NDB tables are now partitioned
mysql-test/r/ndb_partition_key.result:
New test case
mysql-test/r/partition.result:
New test case
mysql-test/r/partition_error.result:
New test case
mysql-test/r/partition_mgm_err.result:
Fix of test case results
mysql-test/t/disabled.def:
partition_03ndb still has bug
mysql-test/t/ndb_partition_key.test:
New test cases for new functionality and bugs
mysql-test/t/partition.test:
New test cases for new functionality and bugs
mysql-test/t/partition_error.test:
New test cases for new functionality and bugs
mysql-test/t/partition_mgm_err.test:
New test cases for new functionality and bugs
mysys/thr_lock.c:
New method to downgrade TL_WRITE_ONLY locks
Possibility to specify if locks are to be upgraded at abort locks
sql/ha_archive.cc:
New handlerton methods
sql/ha_berkeley.cc:
New handlerton methods
sql/ha_blackhole.cc:
New handlerton methods
sql/ha_federated.cc:
New handlerton methods
sql/ha_heap.cc:
New handlerton methods
sql/ha_innodb.cc:
New handlerton methods
sql/ha_myisam.cc:
New handlerton methods
sql/ha_myisammrg.cc:
New handlerton methods
sql/ha_ndbcluster.cc:
New handlerton methods
Moved out packfrm and unpackfrm methods
Adapted many parts to use table_share instead of table->s
Ensured that .ndb file uses filename and not tablename
according to new encoding of names (WL 1324)
All NDB tables are partitioned and set up partition info
Fixed such that tablenames use tablenames and not filenames in NDB
NDB uses auto partitioning for ENGINE=NDB tables
Warning for very large tables
Set RANGE data
Set LIST data
New method to set-up partition info
Set Default number of partitions flag
Set linear hash flag
Set node group array
Set number of fragments
Set max rows
Set tablespace names
New method to get number of partitions of table to use at open table
sql/ha_ndbcluster.h:
Removed partition_flags and alter_table_flags from handler class
A couple of new and changed method headers
sql/ha_ndbcluster_binlog.cc:
Use new method headers
sql/ha_partition.cc:
New handlerton methods
Lots of new function headers
Use #P# as separator between table name and partition name and
#SP# as separator between partition name and subpartition name
Use filename encoding for files both of table name part and of
partition name parts
New method to drop partitions based on partition state
New method to rename partitions based on partition state
New methods to optimize, analyze, check and repair partitions
New methods to optimize, analyze, check and repair table
Helper method to create new partition, open it and external lock
it, not needed to lock it internally since no one else knows about
it yet.
Cleanup method at error for new partitions
New methods to perform bulk of work at ADD/REORGANIZE partitions
(change_partitions, copy_partitions)
sql/ha_partition.h:
New methods and variables
A few dropped ones and a few changed ones
sql/handler.cc:
Handlerton interface changes
New flag to open_table_from_share
sql/handler.h:
New alter_table_flags
New partition flags
New partition states
More states for default handling
Lots of new, dropped and changed interfaces
sql/lex.h:
Added REBUILD and changed name of REORGANISE to REORGANIZE
sql/lock.cc:
Method to downgrade locks
Able to specify if locks upgraded on abort locks
sql/log.cc:
New handlerton methods
sql/mysql_priv.h:
Lots of new interfaces
sql/share/errmsg.txt:
Lots of new, dropped and changed error messages
sql/sql_base.cc:
Adapted to new method headers
New method to abort and upgrade lock
New method to close open tables and downgrade lock
New method to wait for completed table
sql/sql_lex.h:
New flags
sql/sql_partition.cc:
Return int instead of bool in get_partition_id
More defaults handling
Make use of new mem_alloc_error method
More work on function headers
Changes to generate partition syntax to cater for intermediate
partition states
Lots of new code with large comments describing new features for
Partition Management:
ADD/DROP/REORGANIZE/OPTIMIZE/ANALYZE/CHECK/REPAIR partitions
sql/sql_show.cc:
Minors
sql/sql_table.cc:
Moved a couple of methods
New methods to copy create lists and key lists
for use with mysql_prepare_table
New method to write frm file
New handling of handlers with auto partitioning
Fix CREATE TABLE LIKE
Moved code for ADD/DROP/REORGANIZE partitions
Use handlerton method for alter_table_flags
sql/sql_yacc.yy:
More memory alloc error checks
New syntax for REBUILD, ANALYZE, CHECK, OPTIMIZE, REPAIR partitions
sql/table.cc:
Fix length of extra part to be 4 bytes
Partition state introduced in frm file
sql/table.h:
Partition state introduced
sql/unireg.cc:
Partition state introduced
Default partition
storage/csv/ha_tina.cc:
New handlerton methods
storage/example/ha_example.cc:
New handlerton methods
storage/ndb/include/kernel/ndb_limits.h:
RANGE DATA
storage/ndb/include/kernel/signaldata/AlterTable.hpp:
New interfaces in ALTER TABLE towards NDB kernel
storage/ndb/include/kernel/signaldata/DiAddTab.hpp:
New section
storage/ndb/include/kernel/signaldata/DictTabInfo.hpp:
Lots of new parts of table description
storage/ndb/include/kernel/signaldata/LqhFrag.hpp:
tablespace id specified in LQHFRAGREQ
storage/ndb/include/ndbapi/NdbDictionary.hpp:
Lots of new methods in NDB dictionary
storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp:
Lots of new variables in table description
storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
Lots of new variables in table description
storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp:
Lots of new variables in table description
storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp:
New error insertion
storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp:
a few extra jam's
storage/ndb/src/ndbapi/NdbBlob.cpp:
Changes to definition of blob tables
storage/ndb/src/ndbapi/NdbDictionary.cpp:
Lots of new stuff in NDB dictionary
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
Lots of new stuff in NDB dictionary
storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp:
Lots of new stuff in NDB dictionary
storage/ndb/test/ndbapi/test_event.cpp:
removed use of methods no longer in existence
storage/ndb/tools/restore/Restore.cpp:
Renamed variable
2006-01-17 08:40:00 +01:00
|
|
|
NULL, /* Partition flags */
|
|
|
|
NULL, /* Alter table flags */
|
2006-01-11 11:35:25 +01:00
|
|
|
NULL, /* Alter Tablespace */
|
2006-01-28 16:16:23 +13:00
|
|
|
NULL, /* Fill FILES Table */
|
2006-02-07 22:42:57 -08:00
|
|
|
HTON_CAN_RECREATE,
|
|
|
|
NULL, /* binlog_func */
|
|
|
|
NULL /* binlog_log_query */
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
};
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
/*****************************************************************************
|
|
|
|
** TINA tables
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2005-11-05 15:08:15 +03:00
|
|
|
Used for sorting chains with qsort().
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
|
|
|
int sort_set (tina_set *a, tina_set *b)
|
|
|
|
{
|
2005-11-05 15:08:15 +03:00
|
|
|
/*
|
|
|
|
We assume that intervals do not intersect. So, it is enought to compare
|
|
|
|
any two points. Here we take start of intervals for comparison.
|
|
|
|
*/
|
|
|
|
return ( a->begin > b->begin ? -1 : ( a->begin < b->begin ? 1 : 0 ) );
|
2004-08-12 20:57:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static byte* tina_get_key(TINA_SHARE *share,uint *length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
|
|
|
{
|
|
|
|
*length=share->table_name_length;
|
|
|
|
return (byte*) share->table_name;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Reloads the mmap file.
|
|
|
|
*/
|
|
|
|
int get_mmap(TINA_SHARE *share, int write)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::get_mmap");
|
2005-07-08 14:43:27 +04:00
|
|
|
if (share->mapped_file && my_munmap(share->mapped_file,
|
|
|
|
share->file_stat.st_size))
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
|
|
|
|
if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1)
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
if (share->file_stat.st_size)
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
if (write)
|
2005-07-08 14:43:27 +04:00
|
|
|
share->mapped_file= (byte *)my_mmap(NULL, share->file_stat.st_size,
|
|
|
|
PROT_READ|PROT_WRITE, MAP_SHARED,
|
|
|
|
share->data_file, 0);
|
2004-08-12 20:57:18 -07:00
|
|
|
else
|
2005-07-08 14:43:27 +04:00
|
|
|
share->mapped_file= (byte *)my_mmap(NULL, share->file_stat.st_size,
|
|
|
|
PROT_READ, MAP_PRIVATE,
|
|
|
|
share->data_file, 0);
|
2006-04-04 09:59:19 +02:00
|
|
|
if ((share->mapped_file == MAP_FAILED))
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
Bad idea you think? See the problem is that nothing actually checks
|
|
|
|
the return value of ::rnd_init(), so tossing an error is about
|
|
|
|
it for us.
|
|
|
|
Never going to happen right? :)
|
|
|
|
*/
|
|
|
|
my_message(errno, "Woops, blew up opening a mapped file", 0);
|
|
|
|
DBUG_ASSERT(0);
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
}
|
2005-09-29 01:00:47 +04:00
|
|
|
else
|
2004-08-12 20:57:18 -07:00
|
|
|
share->mapped_file= NULL;
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2005-12-21 10:18:40 -08:00
|
|
|
|
|
|
|
static int tina_init_func()
|
|
|
|
{
|
|
|
|
if (!tina_init)
|
|
|
|
{
|
|
|
|
tina_init++;
|
|
|
|
VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST));
|
|
|
|
(void) hash_init(&tina_open_tables,system_charset_info,32,0,0,
|
|
|
|
(hash_get_key) tina_get_key,0,0);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tina_done_func()
|
|
|
|
{
|
|
|
|
if (tina_init)
|
|
|
|
{
|
|
|
|
if (tina_open_tables.records)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
hash_free(&tina_open_tables);
|
|
|
|
pthread_mutex_destroy(&tina_mutex);
|
|
|
|
tina_init--;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
/*
|
|
|
|
Simple lock controls.
|
|
|
|
*/
|
|
|
|
static TINA_SHARE *get_share(const char *table_name, TABLE *table)
|
|
|
|
{
|
|
|
|
TINA_SHARE *share;
|
2006-03-13 19:36:34 +03:00
|
|
|
char meta_file_name[FN_REFLEN];
|
2004-08-12 20:57:18 -07:00
|
|
|
char *tmp_name;
|
|
|
|
uint length;
|
|
|
|
|
2006-01-19 05:56:06 +03:00
|
|
|
if (!tina_init)
|
|
|
|
tina_init_func();
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
pthread_mutex_lock(&tina_mutex);
|
|
|
|
length=(uint) strlen(table_name);
|
2006-01-19 05:56:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
If share is not present in the hash, create a new share and
|
|
|
|
initialize its members.
|
|
|
|
*/
|
2004-08-12 20:57:18 -07:00
|
|
|
if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
|
|
|
|
(byte*) table_name,
|
2006-03-13 19:36:34 +03:00
|
|
|
length)))
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
|
2005-06-01 17:34:10 -07:00
|
|
|
&share, sizeof(*share),
|
|
|
|
&tmp_name, length+1,
|
2005-09-29 01:00:47 +04:00
|
|
|
NullS))
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&tina_mutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
share->use_count= 0;
|
2006-01-19 05:56:06 +03:00
|
|
|
share->is_log_table= FALSE;
|
2005-09-29 01:00:47 +04:00
|
|
|
share->table_name_length= length;
|
|
|
|
share->table_name= tmp_name;
|
2006-03-13 19:36:34 +03:00
|
|
|
share->crashed= FALSE;
|
|
|
|
share->rows_recorded= 0;
|
2005-09-29 01:00:47 +04:00
|
|
|
strmov(share->table_name, table_name);
|
2006-03-13 19:36:34 +03:00
|
|
|
fn_format(share->data_file_name, table_name, "", CSV_EXT,
|
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
|
|
|
|
fn_format(meta_file_name, table_name, "", CSM_EXT,
|
2005-09-29 01:00:47 +04:00
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
|
2004-08-12 20:57:18 -07:00
|
|
|
if (my_hash_insert(&tina_open_tables, (byte*) share))
|
|
|
|
goto error;
|
|
|
|
thr_lock_init(&share->lock);
|
|
|
|
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
/*
|
|
|
|
Open or create the meta file. In the latter case, we'll get
|
|
|
|
an error during read_meta_file and mark the table as crashed.
|
|
|
|
Usually this will result in auto-repair, and we will get a good
|
|
|
|
meta-file in the end.
|
|
|
|
*/
|
|
|
|
if ((share->meta_file= my_open(meta_file_name,
|
|
|
|
O_RDWR|O_CREAT, MYF(0))) == -1)
|
|
|
|
share->crashed= TRUE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
After we read, we set the file to dirty. When we close, we will do the
|
|
|
|
opposite. If the meta file will not open we assume it is crashed and
|
|
|
|
mark it as such.
|
|
|
|
*/
|
|
|
|
if (read_meta_file(share->meta_file, &share->rows_recorded))
|
|
|
|
share->crashed= TRUE;
|
|
|
|
else
|
|
|
|
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
|
|
|
|
|
|
|
|
if ((share->data_file= my_open(share->data_file_name, O_RDWR|O_APPEND,
|
2005-11-06 02:11:12 +03:00
|
|
|
MYF(0))) == -1)
|
2004-08-12 20:57:18 -07:00
|
|
|
goto error2;
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
share->mapped_file= NULL; // We don't know the state as we just allocated it
|
2004-08-12 20:57:18 -07:00
|
|
|
if (get_mmap(share, 0) > 0)
|
|
|
|
goto error3;
|
2006-01-19 05:56:06 +03:00
|
|
|
|
|
|
|
/* init file length value used by readers */
|
|
|
|
share->saved_data_file_length= share->file_stat.st_size;
|
2004-08-12 20:57:18 -07:00
|
|
|
}
|
|
|
|
share->use_count++;
|
|
|
|
pthread_mutex_unlock(&tina_mutex);
|
|
|
|
|
|
|
|
return share;
|
|
|
|
|
|
|
|
error3:
|
|
|
|
my_close(share->data_file,MYF(0));
|
|
|
|
error2:
|
|
|
|
thr_lock_delete(&share->lock);
|
|
|
|
pthread_mutex_destroy(&share->mutex);
|
|
|
|
error:
|
|
|
|
pthread_mutex_unlock(&tina_mutex);
|
|
|
|
my_free((gptr) share, MYF(0));
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
/*
|
|
|
|
Read CSV meta-file
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
read_meta_file()
|
|
|
|
meta_file The meta-file filedes
|
|
|
|
ha_rows Pointer to the var we use to store rows count.
|
|
|
|
These are read from the meta-file.
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
|
|
|
|
Read the meta-file info. For now we are only interested in
|
|
|
|
rows counf, crashed bit and magic number.
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 - OK
|
2006-03-29 06:28:57 +04:00
|
|
|
non-zero - error occurred
|
2006-03-13 19:36:34 +03:00
|
|
|
*/
|
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
static int read_meta_file(File meta_file, ha_rows *rows)
|
2006-03-13 19:36:34 +03:00
|
|
|
{
|
|
|
|
uchar meta_buffer[META_BUFFER_SIZE];
|
|
|
|
uchar *ptr= meta_buffer;
|
|
|
|
|
|
|
|
DBUG_ENTER("ha_tina::read_meta_file");
|
|
|
|
|
|
|
|
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
|
|
|
|
if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0)
|
|
|
|
!= META_BUFFER_SIZE)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Parse out the meta data, we ignore version at the moment
|
|
|
|
*/
|
|
|
|
|
|
|
|
ptr+= sizeof(uchar)*2; // Move past header
|
|
|
|
*rows= (ha_rows)uint8korr(ptr);
|
|
|
|
ptr+= sizeof(ulonglong); // Move past rows
|
|
|
|
/*
|
|
|
|
Move past check_point, auto_increment and forced_flushes fields.
|
|
|
|
They are present in the format, but we do not use them yet.
|
|
|
|
*/
|
|
|
|
ptr+= 3*sizeof(ulonglong);
|
|
|
|
|
|
|
|
/* check crashed bit and magic number */
|
|
|
|
if ((meta_buffer[0] != (uchar)TINA_CHECK_HEADER) ||
|
|
|
|
((bool)(*ptr)== TRUE))
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
|
|
|
my_sync(meta_file, MYF(MY_WME));
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Write CSV meta-file
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
write_meta_file()
|
|
|
|
meta_file The meta-file filedes
|
|
|
|
ha_rows The number of rows we have in the datafile.
|
|
|
|
dirty A flag, which marks whether we have a corrupt table
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
|
|
|
|
Write meta-info the the file. Only rows count, crashed bit and
|
|
|
|
magic number matter now.
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 - OK
|
2006-03-29 06:28:57 +04:00
|
|
|
non-zero - error occurred
|
2006-03-13 19:36:34 +03:00
|
|
|
*/
|
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
static int write_meta_file(File meta_file, ha_rows rows, bool dirty)
|
2006-03-13 19:36:34 +03:00
|
|
|
{
|
|
|
|
uchar meta_buffer[META_BUFFER_SIZE];
|
|
|
|
uchar *ptr= meta_buffer;
|
|
|
|
|
|
|
|
DBUG_ENTER("ha_tina::write_meta_file");
|
|
|
|
|
|
|
|
*ptr= (uchar)TINA_CHECK_HEADER;
|
|
|
|
ptr+= sizeof(uchar);
|
|
|
|
*ptr= (uchar)TINA_VERSION;
|
|
|
|
ptr+= sizeof(uchar);
|
|
|
|
int8store(ptr, (ulonglong)rows);
|
|
|
|
ptr+= sizeof(ulonglong);
|
|
|
|
memset(ptr, 0, 3*sizeof(ulonglong));
|
|
|
|
/*
|
|
|
|
Skip over checkpoint, autoincrement and forced_flushes fields.
|
|
|
|
We'll need them later.
|
|
|
|
*/
|
|
|
|
ptr+= 3*sizeof(ulonglong);
|
|
|
|
*ptr= (uchar)dirty;
|
|
|
|
|
|
|
|
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
|
|
|
|
if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0)
|
|
|
|
!= META_BUFFER_SIZE)
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
my_sync(meta_file, MYF(MY_WME));
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ha_tina::check_and_repair(THD *thd)
|
|
|
|
{
|
|
|
|
HA_CHECK_OPT check_opt;
|
|
|
|
DBUG_ENTER("ha_tina::check_and_repair");
|
|
|
|
|
|
|
|
check_opt.init();
|
|
|
|
|
|
|
|
DBUG_RETURN(repair(thd, &check_opt));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool ha_tina::is_crashed() const
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::is_crashed");
|
|
|
|
DBUG_RETURN(share->crashed);
|
|
|
|
}
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
Free lock controls.
|
|
|
|
*/
|
|
|
|
static int free_share(TINA_SHARE *share)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::free_share");
|
|
|
|
pthread_mutex_lock(&tina_mutex);
|
|
|
|
int result_code= 0;
|
|
|
|
if (!--share->use_count){
|
2006-03-13 19:36:34 +03:00
|
|
|
/* Write the meta file. Mark it as crashed if needed. */
|
|
|
|
(void)write_meta_file(share->meta_file, share->rows_recorded,
|
|
|
|
share->crashed ? TRUE :FALSE);
|
|
|
|
if (my_close(share->meta_file, MYF(0)))
|
|
|
|
result_code= 1;
|
2005-09-29 01:00:47 +04:00
|
|
|
if (share->mapped_file)
|
2005-07-08 14:43:27 +04:00
|
|
|
my_munmap(share->mapped_file, share->file_stat.st_size);
|
2006-04-11 12:12:48 +02:00
|
|
|
share->mapped_file= NULL;
|
2004-08-12 20:57:18 -07:00
|
|
|
result_code= my_close(share->data_file,MYF(0));
|
|
|
|
hash_delete(&tina_open_tables, (byte*) share);
|
|
|
|
thr_lock_delete(&share->lock);
|
|
|
|
pthread_mutex_destroy(&share->mutex);
|
|
|
|
my_free((gptr) share, MYF(0));
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&tina_mutex);
|
|
|
|
|
|
|
|
DBUG_RETURN(result_code);
|
|
|
|
}
|
|
|
|
|
2005-11-07 16:25:06 +01:00
|
|
|
int tina_end(ha_panic_function type)
|
2005-09-22 16:05:05 +02:00
|
|
|
{
|
2005-12-21 10:18:40 -08:00
|
|
|
return tina_done_func();
|
2005-09-22 16:05:05 +02:00
|
|
|
}
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
Finds the end of a line.
|
|
|
|
Currently only supports files written on a UNIX OS.
|
|
|
|
*/
|
2005-09-29 01:00:47 +04:00
|
|
|
byte * find_eoln(byte *data, off_t begin, off_t end)
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
2005-09-29 01:00:47 +04:00
|
|
|
for (off_t x= begin; x < end; x++)
|
2004-08-12 20:57:18 -07:00
|
|
|
if (data[x] == '\n')
|
|
|
|
return data + x;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 22:45:02 +02:00
|
|
|
static handler *tina_create_handler(TABLE_SHARE *table)
|
2005-11-07 16:25:06 +01:00
|
|
|
{
|
|
|
|
return new ha_tina(table);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 22:45:02 +02:00
|
|
|
ha_tina::ha_tina(TABLE_SHARE *table_arg)
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
:handler(&tina_hton, table_arg),
|
|
|
|
/*
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 22:45:02 +02:00
|
|
|
These definitions are found in handler.h
|
|
|
|
They are not probably completely right.
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
*/
|
2006-01-19 05:56:06 +03:00
|
|
|
current_position(0), next_position(0), local_saved_data_file_length(0),
|
|
|
|
chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH),
|
|
|
|
records_is_known(0)
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
{
|
|
|
|
/* Set our original buffers from pre-allocated memory */
|
2006-04-04 09:59:19 +02:00
|
|
|
buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info);
|
A fix and a test case for Bug#10760 and complementary cleanups.
The idea of the patch
is that every cursor gets its own lock id for table level locking.
Thus cursors are protected from updates performed within the same
connection. Additionally a list of transient (must be closed at
commit) cursors is maintained and all transient cursors are closed
when necessary. Lastly, this patch adds support for deadlock
timeouts to TLL locking when using cursors.
+ post-review fixes.
include/thr_lock.h:
- add a notion of lock owner to table level locking. When using
cursors, lock owner can not be identified by a thread id any more,
as we must protect cursors from updates issued within the same
connection (thread). So, each cursor has its own lock identifier to
use with table level locking.
- extend return values of thr_lock and thr_multi_lock with
THR_LOCK_TIMEOUT and THR_LOCK_DEADLOCK, since these conditions
are now possible (see comments to thr_lock.c)
mysys/thr_lock.c:
Better support for cursors:
- use THR_LOCK_OWNER * as lock identifier, not pthread_t.
- check and return an error for a trivial deadlock case, when an
update statement is issued to a table locked by a cursor which has
been previously opened in the same connection.
- add support for locking timeouts: with use of cursors, trivial
deadlocks can occur. For now the only remedy is the lock wait timeout,
which is initialized from a new global variable 'table_lock_wait_timeout'
Example of a deadlock (assuming the storage engine does not downgrade
locks):
con1: open cursor for select * from t1;
con2: open cursor for select * from t2;
con1: update t2 set id=id*2; -- blocked
con2: update t1 set id=id*2; -- deadlock
Lock timeouts are active only if a connection is using cursors.
- the check in the wait_for_lock loop has been changed from
data->cond != cond to data->cond != 0. data->cond is zeroed
in every place it's changed.
- added comments
sql/examples/ha_archive.cc:
- extend the handlerton with the info about cursor behaviour at commit.
sql/examples/ha_archive.h:
- ctor moved to .cc to make use of archive handlerton
sql/examples/ha_example.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_example.h:
- ctor moved to .cc to make use of ha_example handlerton
sql/examples/ha_tina.cc:
- add handlerton instance, init handler::ht with it
sql/examples/ha_tina.h:
- ctor moved to .cc to make use of CSV handlerton
sql/ha_berkeley.cc:
- init handlerton::flags and handler::ht
sql/ha_berkeley.h:
- ctor moved to .cc to make use of BerkeleyDB handlerton
sql/ha_blackhole.cc:
- add handlerton instance, init handler::ht with it
sql/ha_blackhole.h:
- ctor moved to .cc to make use of blackhole handlerton
sql/ha_federated.cc:
- add handlerton instance, init handler::ht with it
sql/ha_federated.h:
- ctor moved to .cc to make use of federated handlerton
sql/ha_heap.cc:
- add handlerton instance, init handler::ht with it
sql/ha_heap.h:
- ctor moved to .cc to make use of ha_heap handlerton
sql/ha_innodb.cc:
- init handlerton::flags and handler::ht of innobase storage engine
sql/ha_innodb.h:
- ctor moved to .cc to make use of archive handlerton
sql/ha_myisam.cc:
- add handlerton instance, init handler::ht with it
sql/ha_myisam.h:
- ctor moved to .cc to make use of MyISAM handlerton
sql/ha_myisammrg.cc:
- init handler::ht in the ctor
sql/ha_myisammrg.h:
- ctor moved to .cc to make use of MyISAM MERGE handlerton
sql/ha_ndbcluster.cc:
- init handlerton::flags and handler::ht
sql/handler.cc:
- drop support for ISAM storage engine, which was removed from 5.0
- close all "transient" cursors at COMMIT/ROLLBACK. A "transient"
SQL level cursor is a cursor that uses tables that have a transaction-
specific state.
sql/handler.h:
- extend struct handlerton with flags, add handlerton *ht to every
handler instance.
sql/lock.cc:
- extend mysql_lock_tables to send error to the client if
thr_multi_lock returns a timeout or a deadlock error.
sql/mysqld.cc:
- add server option --table_lock_wait_timeout (in seconds)
sql/set_var.cc:
- add new global variable 'table_lock_wait_timeout' to specify
a wait timeout for table-level locks of MySQL (in seconds). The default
timeout is 50 seconds. The timeout is active only if the connection
has open cursors.
sql/sql_class.cc:
- implement Statement_map::close_transient_cursors
- safety suggests that we need an assert ensuring
llock_info->n_cursors is functioning properly, adjust destruction of
the Statement_map to allow such assert in THD::~THD
sql/sql_class.h:
- add support for Cursors registry to Statement map.
sql/sql_prepare.cc:
- maintain a list of cursors that must be closed at commit/rollback.
sql/sql_select.cc:
- extend class Cursor to support specific at-COMMIT/ROLLBACK behavior.
If a cursor uses tables of a storage engine that
invalidates all open tables at COMMIT/ROLLBACK, it must be closed
before COMMIT/ROLLBACK is executed.
sql/sql_select.h:
- add an own lock_id and commit/rollback status flag to class Cursor
tests/mysql_client_test.c:
A test case for Bug#10760 and complementary issues: test a simple
deadlock case too.
mysql-test/var:
New BitKeeper file ``mysql-test/var''
2005-07-19 22:21:12 +04:00
|
|
|
chain= chain_buffer;
|
|
|
|
}
|
|
|
|
|
2006-01-19 05:56:06 +03:00
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
/*
|
|
|
|
Encode a buffer into the quoted format.
|
|
|
|
*/
|
Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it
- For a busy system this change will give you:
- Less memory usage for table object
- Faster opening of tables (if it's has been in use or is in table definition cache)
- Allow you to cache many table definitions objects
- Faster drop of table
mysql-test/mysql-test-run.sh:
Fixed some problems with --gdb option
Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
Removed old (now wrong) result
mysql-test/r/innodb.result:
Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
Extra flush table test
mysql-test/r/ndb_bitfield.result:
Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
Remove tables left from old tests
mysql-test/r/temp_table.result:
Test truncate with temporary tables
mysql-test/r/variables.result:
Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
Extra flush table test
mysql-test/t/multi_update.test:
Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
Remove tables left from old tests
mysql-test/t/temp_table.test:
Test truncate with temporary tables
mysql-test/t/variables.test:
Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
More DBUG
mysys/mf_pack.c:
Better comment
mysys/mf_tempdir.c:
More DBUG
Ensure that we call cleanup_dirname() on all temporary directory paths.
If we don't do this, we will get a failure when comparing temporary table
names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
Indentation fix
sql/examples/ha_example.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Use s->db instead of s->table_cache_key
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/field.h:
Update for table definition cache:
- Field creation now takes TABLE_SHARE instead of TABLE as argument
(This is becasue field definitions are now cached in TABLE_SHARE)
When a field is created, one now must call field->init(TABLE) before using it
- Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
- make_field() takes TABLE_SHARE as argument instead of TABLE
- move_field() -> move_field_offset()
sql/ha_archive.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Changed name of argument create() to not hide internal 'table' variable.
table->s -> table_share
sql/ha_berkeley.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed comments
Remove index variable and replace with pointers (simple optimization)
move_field() -> move_field_offset()
Removed some strlen() calls
sql/ha_federated.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Remove not needed fn_format()
Fixed for new table->s structure
sql/ha_myisam.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Don't set 'is_view' for MERGE tables
Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Fixed wrong calls to strxnmov()
Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
drop_table -> intern_drop_table()
table->s -> table_share
Move part_info to TABLE
Fixed comments & DBUG print's
New arguments to print_error()
sql/ha_ndbcluster.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
We can't set up or use part_info when creating handler as there is not yet any table object
New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
Got set_part_info() from Mikael
sql/handler.cc:
We new use TABLE_SHARE instead of TABLE when creating engine handlers
ha_delete_table() now also takes database as an argument
handler::ha_open() now takes TABLE as argument
ha_open() now calls ha_allocate_read_write_set()
Simplify ha_allocate_read_write_set()
Remove ha_deallocate_read_write_set()
Use table_share (Cached by table definition cache)
sql/handler.h:
New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
Remove ha_deallocate_read_write_set()
get_new_handler() now takes TABLE_SHARE as argument
ha_delete_table() now gets database as argument
sql/item.cc:
table_name and db are now LEX_STRING objects
When creating fields, we have now have to call field->init(table)
move_field -> move_field_offset()
sql/item.h:
tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
instead of BLOB
sql/item_cmpfunc.cc:
Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
Assert if new not handled cast type
sql/item_func.cc:
When creating fields, we have now have to call field->init(table)
dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
Trivial code cleanups
sql/item_sum.cc:
When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
Item_func_str_to_date::tmp_table_field() now replaced by call to
tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
Simply tmp_table_field()
sql/item_uniq.cc:
When creating fields, we have now have to call field->init(table)
sql/key.cc:
Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
More debugging
Use create_table_def_key() to create key for table cache
Allocate TABLE_SHARE properly when creating name lock
Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
New functions for table definition cache
New interfaces to a lot of functions.
New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
Added support for table definition cache of size 'table_def_size'
Fixed som calls to strnmov()
Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
Use new interfaces
Fixed warnings from valgrind
sql/parse_file.cc:
Safer calls to strxnmov()
Fixed typo
sql/set_var.cc:
Added variable 'table_definition_cache'
Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
Use new interface
sql/sp.cc:
Proper use of TABLE_SHARE
sql/sp_head.cc:
Remove compiler warnings
We have now to call field->init(table)
sql/sp_head.h:
Pointers to parsed strings are now const
sql/sql_acl.cc:
table_name is now a LEX_STRING
sql/sql_base.cc:
Main implementation of table definition cache
(The #ifdef's are there for the future when table definition cache will replace open table cache)
Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
Views are not yet cached; For the moment we only cache if a table is a view or not.
Faster implementation of find_temorary_table()
Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
table_cache_key and table_name are now LEX_STRING
'sDBUG print fixes
sql/sql_class.cc:
table_cache_key is now a LEX_STRING
safer strxnmov()
sql/sql_class.h:
Added number of open table shares (table definitions)
sql/sql_db.cc:
safer strxnmov()
sql/sql_delete.cc:
Use new interface to find_temporary_table()
sql/sql_derived.cc:
table_name is now a LEX_STRING
sql/sql_handler.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
Safer strxnmov()
sql/sql_parse.cc:
Better error if wrong DB name
sql/sql_partition.cc:
part_info moved to TABLE from TABLE_SHARE
Indentation changes
sql/sql_select.cc:
Indentation fixes
Call field->init(TABLE) for new created fields
Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
Call field->init(TABLE) for new created fields
sql/sql_show.cc:
table_name is now a LEX_STRING
part_info moved to TABLE
sql/sql_table.cc:
Use table definition cache to speed up delete of tables
Fixed calls to functions with new interfaces
Don't use 'share_not_to_be_used'
Instead of doing openfrm() when doing repair, we now have to call
get_table_share() followed by open_table_from_share().
Replace some fn_format() with faster unpack_filename().
Safer strxnmov()
part_info is now in TABLE
Added Mikaels patch for partition and ALTER TABLE
Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
safer strxnmov()
Removed compiler warnings
sql/sql_update.cc:
Call field->init(TABLE) after field is created
sql/sql_view.cc:
safer strxnmov()
Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
object and open a handler to the table object
open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
TABLE objects for the same database table now share a common TABLE_SHARE object
In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
Changed arguments to rea_create_table() to have same order as other functions
Call field->init(table) for new created fields
sql/unireg.h:
Added OPEN_VIEW
strings/strxnmov.c:
Change strxnmov() to always add end \0
This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 22:45:02 +02:00
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
int ha_tina::encode_quote(byte *buf)
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
char attribute_buffer[1024];
|
|
|
|
String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin);
|
|
|
|
|
|
|
|
buffer.length(0);
|
|
|
|
for (Field **field=table->field ; *field ; field++)
|
|
|
|
{
|
|
|
|
const char *ptr;
|
|
|
|
const char *end_ptr;
|
|
|
|
|
2006-03-06 21:03:17 +03:00
|
|
|
/*
|
|
|
|
Write an empty string to the buffer in case of a NULL value.
|
|
|
|
Basically this is a safety check, as no one ensures that the
|
|
|
|
field content is cleaned up every time we use Field::set_null()
|
|
|
|
in the code.
|
|
|
|
*/
|
|
|
|
if ((*field)->is_null())
|
|
|
|
ptr= end_ptr= 0;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
(*field)->val_str(&attribute,&attribute);
|
|
|
|
ptr= attribute.ptr();
|
|
|
|
end_ptr= attribute.length() + ptr;
|
|
|
|
}
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
buffer.append('"');
|
|
|
|
|
|
|
|
while (ptr < end_ptr)
|
|
|
|
{
|
|
|
|
if (*ptr == '"')
|
|
|
|
{
|
|
|
|
buffer.append('\\');
|
|
|
|
buffer.append('"');
|
|
|
|
*ptr++;
|
|
|
|
}
|
|
|
|
else if (*ptr == '\r')
|
|
|
|
{
|
|
|
|
buffer.append('\\');
|
|
|
|
buffer.append('r');
|
|
|
|
*ptr++;
|
|
|
|
}
|
|
|
|
else if (*ptr == '\\')
|
|
|
|
{
|
|
|
|
buffer.append('\\');
|
|
|
|
buffer.append('\\');
|
|
|
|
*ptr++;
|
|
|
|
}
|
|
|
|
else if (*ptr == '\n')
|
|
|
|
{
|
|
|
|
buffer.append('\\');
|
|
|
|
buffer.append('n');
|
|
|
|
*ptr++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
buffer.append(*ptr++);
|
|
|
|
}
|
|
|
|
buffer.append('"');
|
|
|
|
buffer.append(',');
|
|
|
|
}
|
|
|
|
// Remove the comma, add a line feed
|
|
|
|
buffer.length(buffer.length() - 1);
|
|
|
|
buffer.append('\n');
|
|
|
|
//buffer.replace(buffer.length(), 0, "\n", 1);
|
|
|
|
|
|
|
|
return (buffer.length());
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-09-29 01:00:47 +04:00
|
|
|
chain_append() adds delete positions to the chain that we use to keep
|
2006-03-29 06:28:57 +04:00
|
|
|
track of space. Then the chain will be used to cleanup "holes", occurred
|
2005-09-29 01:00:47 +04:00
|
|
|
due to deletes and updates.
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
|
|
|
int ha_tina::chain_append()
|
|
|
|
{
|
|
|
|
if ( chain_ptr != chain && (chain_ptr -1)->end == current_position)
|
|
|
|
(chain_ptr -1)->end= next_position;
|
2005-09-29 01:00:47 +04:00
|
|
|
else
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
/* We set up for the next position */
|
|
|
|
if ((off_t)(chain_ptr - chain) == (chain_size -1))
|
|
|
|
{
|
|
|
|
off_t location= chain_ptr - chain;
|
|
|
|
chain_size += DEFAULT_CHAIN_LENGTH;
|
|
|
|
if (chain_alloced)
|
|
|
|
{
|
|
|
|
/* Must cast since my_malloc unlike malloc doesn't have a void ptr */
|
2005-09-29 01:00:47 +04:00
|
|
|
if ((chain= (tina_set *) my_realloc((gptr)chain,
|
|
|
|
chain_size, MYF(MY_WME))) == NULL)
|
2004-08-12 20:57:18 -07:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2005-09-29 01:00:47 +04:00
|
|
|
tina_set *ptr= (tina_set *) my_malloc(chain_size * sizeof(tina_set),
|
|
|
|
MYF(MY_WME));
|
2004-08-12 20:57:18 -07:00
|
|
|
memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set));
|
|
|
|
chain= ptr;
|
|
|
|
chain_alloced++;
|
|
|
|
}
|
|
|
|
chain_ptr= chain + location;
|
|
|
|
}
|
|
|
|
chain_ptr->begin= current_position;
|
|
|
|
chain_ptr->end= next_position;
|
|
|
|
chain_ptr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
Scans for a row.
|
|
|
|
*/
|
|
|
|
int ha_tina::find_current_row(byte *buf)
|
|
|
|
{
|
2006-01-19 05:56:06 +03:00
|
|
|
byte *mapped_ptr;
|
2004-08-12 20:57:18 -07:00
|
|
|
byte *end_ptr;
|
|
|
|
DBUG_ENTER("ha_tina::find_current_row");
|
|
|
|
|
2006-01-19 05:56:06 +03:00
|
|
|
mapped_ptr= (byte *)share->mapped_file + current_position;
|
|
|
|
|
|
|
|
/*
|
|
|
|
We do not read further then local_saved_data_file_length in order
|
|
|
|
not to conflict with undergoing concurrent insert.
|
|
|
|
*/
|
2005-09-29 01:00:47 +04:00
|
|
|
if ((end_ptr= find_eoln(share->mapped_file, current_position,
|
2006-01-19 05:56:06 +03:00
|
|
|
local_saved_data_file_length)) == 0)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
|
|
|
|
|
|
|
for (Field **field=table->field ; *field ; field++)
|
|
|
|
{
|
|
|
|
buffer.length(0);
|
2006-03-13 19:36:34 +03:00
|
|
|
if (*mapped_ptr == '"')
|
|
|
|
mapped_ptr++; // Increment past the first quote
|
|
|
|
else
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-08-12 20:57:18 -07:00
|
|
|
for(;mapped_ptr != end_ptr; mapped_ptr++)
|
|
|
|
{
|
2005-09-29 01:00:47 +04:00
|
|
|
// Need to convert line feeds!
|
|
|
|
if (*mapped_ptr == '"' &&
|
|
|
|
(((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) ||
|
|
|
|
(mapped_ptr == end_ptr -1 )))
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
mapped_ptr += 2; // Move past the , and the "
|
|
|
|
break;
|
2005-09-29 01:00:47 +04:00
|
|
|
}
|
|
|
|
if (*mapped_ptr == '\\' && mapped_ptr != (end_ptr - 1))
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
mapped_ptr++;
|
|
|
|
if (*mapped_ptr == 'r')
|
|
|
|
buffer.append('\r');
|
|
|
|
else if (*mapped_ptr == 'n' )
|
|
|
|
buffer.append('\n');
|
|
|
|
else if ((*mapped_ptr == '\\') || (*mapped_ptr == '"'))
|
|
|
|
buffer.append(*mapped_ptr);
|
|
|
|
else /* This could only happed with an externally created file */
|
|
|
|
{
|
|
|
|
buffer.append('\\');
|
|
|
|
buffer.append(*mapped_ptr);
|
|
|
|
}
|
2005-09-29 01:00:47 +04:00
|
|
|
}
|
2006-03-13 19:36:34 +03:00
|
|
|
else // ordinary symbol
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
We are at final symbol and no last quote was found =>
|
|
|
|
we are working with a damaged file.
|
|
|
|
*/
|
|
|
|
if (mapped_ptr == end_ptr -1)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2004-08-12 20:57:18 -07:00
|
|
|
buffer.append(*mapped_ptr);
|
2006-03-13 19:36:34 +03:00
|
|
|
}
|
2004-08-12 20:57:18 -07:00
|
|
|
}
|
|
|
|
(*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
|
|
|
|
}
|
|
|
|
next_position= (end_ptr - share->mapped_file)+1;
|
|
|
|
/* Maybe use \N for null? */
|
2005-01-06 18:34:17 -08:00
|
|
|
memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
If frm_error() is called in table.cc this is called to find out what file
|
|
|
|
extensions exist for this handler.
|
|
|
|
*/
|
2005-04-27 11:25:08 +02:00
|
|
|
static const char *ha_tina_exts[] = {
|
2006-03-13 19:36:34 +03:00
|
|
|
CSV_EXT,
|
|
|
|
CSM_EXT,
|
2005-04-27 11:25:08 +02:00
|
|
|
NullS
|
|
|
|
};
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
const char **ha_tina::bas_ext() const
|
2005-04-27 11:25:08 +02:00
|
|
|
{
|
|
|
|
return ha_tina_exts;
|
|
|
|
}
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2006-01-19 05:56:06 +03:00
|
|
|
/*
|
|
|
|
Three functions below are needed to enable concurrent insert functionality
|
|
|
|
for CSV engine. For more details see mysys/thr_lock.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
void tina_get_status(void* param, int concurrent_insert)
|
|
|
|
{
|
|
|
|
ha_tina *tina= (ha_tina*) param;
|
|
|
|
tina->get_status();
|
|
|
|
}
|
|
|
|
|
|
|
|
void tina_update_status(void* param)
|
|
|
|
{
|
|
|
|
ha_tina *tina= (ha_tina*) param;
|
|
|
|
tina->update_status();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this should exist and return 0 for concurrent insert to work */
|
|
|
|
my_bool tina_check_status(void* param)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Save the state of the table
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
get_status()
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
This function is used to retrieve the file length. During the lock
|
|
|
|
phase of concurrent insert. For more details see comment to
|
|
|
|
ha_tina::update_status below.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ha_tina::get_status()
|
|
|
|
{
|
|
|
|
if (share->is_log_table)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
We have to use mutex to follow pthreads memory visibility
|
|
|
|
rules for share->saved_data_file_length
|
|
|
|
*/
|
|
|
|
pthread_mutex_lock(&share->mutex);
|
|
|
|
local_saved_data_file_length= share->saved_data_file_length;
|
|
|
|
pthread_mutex_unlock(&share->mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
local_saved_data_file_length= share->saved_data_file_length;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Correct the state of the table. Called by unlock routines
|
|
|
|
before the write lock is released.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
update_status()
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
When we employ concurrent insert lock, we save current length of the file
|
|
|
|
during the lock phase. We do not read further saved value, as we don't
|
|
|
|
want to interfere with undergoing concurrent insert. Writers update file
|
|
|
|
length info during unlock with update_status().
|
|
|
|
|
|
|
|
NOTE
|
|
|
|
For log tables concurrent insert works different. The reason is that
|
|
|
|
log tables are always opened and locked. And as they do not unlock
|
|
|
|
tables, the file length after writes should be updated in a different
|
|
|
|
way. For this purpose we need is_log_table flag. When this flag is set
|
|
|
|
we call update_status() explicitly after each row write.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ha_tina::update_status()
|
|
|
|
{
|
|
|
|
/* correct local_saved_data_file_length for writers */
|
|
|
|
share->saved_data_file_length= share->file_stat.st_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-20 00:40:56 +03:00
|
|
|
bool ha_tina::check_if_locking_is_allowed(uint sql_command,
|
|
|
|
ulong type, TABLE *table,
|
|
|
|
uint count,
|
|
|
|
bool called_by_logger_thread)
|
2006-01-19 05:56:06 +03:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
Deny locking of the log tables, which is incompatible with
|
|
|
|
concurrent insert. Unless called from a logger THD:
|
|
|
|
general_log_thd or slow_log_thd.
|
|
|
|
*/
|
|
|
|
if (table->s->log_table &&
|
2006-01-20 00:40:56 +03:00
|
|
|
sql_command != SQLCOM_TRUNCATE &&
|
|
|
|
!(sql_command == SQLCOM_FLUSH &&
|
|
|
|
type & REFRESH_LOG) &&
|
|
|
|
!called_by_logger_thread &&
|
2006-01-19 05:56:06 +03:00
|
|
|
(table->reginfo.lock_type >= TL_READ_NO_INSERT))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
The check >= TL_READ_NO_INSERT denies all write locks
|
|
|
|
plus the only read lock (TL_READ_NO_INSERT itself)
|
|
|
|
*/
|
|
|
|
table->reginfo.lock_type == TL_READ_NO_INSERT ?
|
|
|
|
my_error(ER_CANT_READ_LOCK_LOG_TABLE, MYF(0)) :
|
|
|
|
my_error(ER_CANT_WRITE_LOCK_LOG_TABLE, MYF(0));
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
return TRUE;
|
|
|
|
}
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
Open a database file. Keep in mind that tables are caches, so
|
|
|
|
this will not be called for every request. Any sort of positions
|
|
|
|
that need to be reset should be kept in the ::extra() call.
|
|
|
|
*/
|
2006-03-13 19:36:34 +03:00
|
|
|
int ha_tina::open(const char *name, int mode, uint open_options)
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::open");
|
|
|
|
|
|
|
|
if (!(share= get_share(name, table)))
|
2006-03-29 06:28:57 +04:00
|
|
|
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
2006-03-13 19:36:34 +03:00
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR))
|
2006-03-13 19:36:34 +03:00
|
|
|
{
|
|
|
|
free_share(share);
|
2006-03-29 06:28:57 +04:00
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
2006-03-13 19:36:34 +03:00
|
|
|
}
|
2006-01-19 05:56:06 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
Init locking. Pass handler object to the locking routines,
|
|
|
|
so that they could save/update local_saved_data_file_length value
|
|
|
|
during locking. This is needed to enable concurrent inserts.
|
|
|
|
*/
|
|
|
|
thr_lock_data_init(&share->lock, &lock, (void*) this);
|
2004-08-12 20:57:18 -07:00
|
|
|
ref_length=sizeof(off_t);
|
|
|
|
|
2006-01-19 05:56:06 +03:00
|
|
|
share->lock.get_status= tina_get_status;
|
|
|
|
share->lock.update_status= tina_update_status;
|
|
|
|
share->lock.check_status= tina_check_status;
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Close a database file. We remove ourselves from the shared strucutre.
|
|
|
|
If it is empty we destroy it and free the mapped file.
|
|
|
|
*/
|
|
|
|
int ha_tina::close(void)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::close");
|
|
|
|
DBUG_RETURN(free_share(share));
|
|
|
|
}
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
This is an INSERT. At the moment this handler just seeks to the end
|
|
|
|
of the file and appends the data. In an error case it really should
|
|
|
|
just truncate to the original position (this is not done yet).
|
|
|
|
*/
|
|
|
|
int ha_tina::write_row(byte * buf)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
DBUG_ENTER("ha_tina::write_row");
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
if (share->crashed)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
2006-01-20 00:40:56 +03:00
|
|
|
ha_statistic_increment(&SSV::ha_write_count);
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2004-10-18 10:32:52 +04:00
|
|
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
|
|
|
|
table->timestamp_field->set_time();
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
size= encode_quote(buf);
|
|
|
|
|
2006-04-04 09:59:19 +02:00
|
|
|
if (my_write(share->data_file, (byte*)buffer.ptr(), size,
|
|
|
|
MYF(MY_WME | MY_NABP)))
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
|
|
|
Ok, this is means that we will be doing potentially bad things
|
2004-08-12 20:57:18 -07:00
|
|
|
during a bulk insert on some OS'es. What we need is a cleanup
|
|
|
|
call for ::write_row that would let us fix up everything after the bulk
|
|
|
|
insert. The archive handler does this with an extra mutx call, which
|
|
|
|
might be a solution for this.
|
|
|
|
*/
|
2005-09-29 01:00:47 +04:00
|
|
|
if (get_mmap(share, 0) > 0)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
2006-01-19 05:56:06 +03:00
|
|
|
|
|
|
|
/* update local copy of the max position to see our own changes */
|
|
|
|
local_saved_data_file_length= share->file_stat.st_size;
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
/* update shared info */
|
|
|
|
pthread_mutex_lock(&share->mutex);
|
|
|
|
share->rows_recorded++;
|
2006-01-19 05:56:06 +03:00
|
|
|
/* update status for the log tables */
|
|
|
|
if (share->is_log_table)
|
|
|
|
update_status();
|
2006-03-13 19:36:34 +03:00
|
|
|
pthread_mutex_unlock(&share->mutex);
|
2006-01-19 05:56:06 +03:00
|
|
|
|
2005-11-17 22:52:31 +01:00
|
|
|
records++;
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
This is called for an update.
|
2005-09-29 01:00:47 +04:00
|
|
|
Make sure you put in code to increment the auto increment, also
|
2004-08-12 20:57:18 -07:00
|
|
|
update any timestamp data. Currently auto increment is not being
|
|
|
|
fixed since autoincrements have yet to be added to this table handler.
|
|
|
|
This will be called in a table scan right before the previous ::rnd_next()
|
|
|
|
call.
|
|
|
|
*/
|
|
|
|
int ha_tina::update_row(const byte * old_data, byte * new_data)
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
DBUG_ENTER("ha_tina::update_row");
|
|
|
|
|
2006-01-20 00:40:56 +03:00
|
|
|
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2004-10-18 10:32:52 +04:00
|
|
|
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
|
|
|
table->timestamp_field->set_time();
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
size= encode_quote(new_data);
|
|
|
|
|
|
|
|
if (chain_append())
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2006-04-04 09:59:19 +02:00
|
|
|
if (my_write(share->data_file, (byte*)buffer.ptr(), size,
|
|
|
|
MYF(MY_WME | MY_NABP)))
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
2006-01-19 05:56:06 +03:00
|
|
|
|
|
|
|
/* UPDATE should never happen on the log tables */
|
|
|
|
DBUG_ASSERT(!share->is_log_table);
|
|
|
|
|
|
|
|
/* update local copy of the max position to see our own changes */
|
|
|
|
local_saved_data_file_length= share->file_stat.st_size;
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
|
|
|
Deletes a row. First the database will find the row, and then call this
|
|
|
|
method. In the case of a table scan, the previous call to this will be
|
|
|
|
the ::rnd_next() that found this row.
|
|
|
|
The exception to this is an ORDER BY. This will cause the table handler
|
|
|
|
to walk the table noting the positions of all rows that match a query.
|
|
|
|
The table will then be deleted/positioned based on the ORDER (so RANDOM,
|
|
|
|
DESC, ASC).
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
|
|
|
int ha_tina::delete_row(const byte * buf)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::delete_row");
|
2006-01-20 00:40:56 +03:00
|
|
|
ha_statistic_increment(&SSV::ha_delete_count);
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
if (chain_append())
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
--records;
|
|
|
|
|
2006-01-19 05:56:06 +03:00
|
|
|
/* DELETE should never happen on the log table */
|
|
|
|
DBUG_ASSERT(!share->is_log_table);
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
|
|
|
All table scans call this first.
|
2004-08-12 20:57:18 -07:00
|
|
|
The order of a table scan is:
|
|
|
|
|
|
|
|
ha_tina::store_lock
|
|
|
|
ha_tina::external_lock
|
|
|
|
ha_tina::info
|
|
|
|
ha_tina::rnd_init
|
|
|
|
ha_tina::extra
|
|
|
|
ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::rnd_next
|
|
|
|
ha_tina::extra
|
|
|
|
ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
|
|
|
|
ha_tina::external_lock
|
|
|
|
ha_tina::extra
|
|
|
|
ENUM HA_EXTRA_RESET Reset database to after open
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
Each call to ::rnd_next() represents a row returned in the can. When no more
|
|
|
|
rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE.
|
2004-08-12 20:57:18 -07:00
|
|
|
The ::info() call is just for the optimizer.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_tina::rnd_init(bool scan)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::rnd_init");
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
if (share->crashed)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
current_position= next_position= 0;
|
|
|
|
records= 0;
|
2005-11-17 22:52:31 +01:00
|
|
|
records_is_known= 0;
|
2004-08-12 20:57:18 -07:00
|
|
|
chain_ptr= chain;
|
2005-08-31 10:08:55 -07:00
|
|
|
#ifdef HAVE_MADVISE
|
2005-09-07 10:30:06 -07:00
|
|
|
if (scan)
|
2005-09-29 01:00:47 +04:00
|
|
|
(void) madvise(share->mapped_file, share->file_stat.st_size,
|
|
|
|
MADV_SEQUENTIAL);
|
2005-08-25 01:02:40 +02:00
|
|
|
#endif
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-09-29 01:00:47 +04:00
|
|
|
::rnd_next() does all the heavy lifting for a table scan. You will need to
|
|
|
|
populate *buf with the correct field data. You can walk the field to
|
|
|
|
determine at what position you should store the data (take a look at how
|
|
|
|
::find_current_row() works). The structure is something like:
|
2004-08-12 20:57:18 -07:00
|
|
|
0Foo Dog Friend
|
2005-09-29 01:00:47 +04:00
|
|
|
The first offset is for the first attribute. All space before that is
|
|
|
|
reserved for null count.
|
|
|
|
Basically this works as a mask for which rows are nulled (compared to just
|
|
|
|
empty).
|
|
|
|
This table handler doesn't do nulls and does not know the difference between
|
|
|
|
NULL and "". This is ok since this table handler is for spreadsheets and
|
|
|
|
they don't know about them either :)
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
|
|
|
int ha_tina::rnd_next(byte *buf)
|
|
|
|
{
|
2006-03-13 19:36:34 +03:00
|
|
|
int rc;
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_ENTER("ha_tina::rnd_next");
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
if (share->crashed)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
|
2006-01-20 00:40:56 +03:00
|
|
|
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
current_position= next_position;
|
2005-09-29 01:00:47 +04:00
|
|
|
if (!share->mapped_file)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
2006-03-13 19:36:34 +03:00
|
|
|
if ((rc= find_current_row(buf)))
|
|
|
|
DBUG_RETURN(rc);
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
records++;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
In the case of an order by rows will need to be sorted.
|
2005-09-29 01:00:47 +04:00
|
|
|
::position() is called after each call to ::rnd_next(),
|
2004-08-12 20:57:18 -07:00
|
|
|
the data it stores is to a byte array. You can store this
|
2005-09-29 01:00:47 +04:00
|
|
|
data via my_store_ptr(). ref_length is a variable defined to the
|
|
|
|
class that is the sizeof() of position being stored. In our case
|
|
|
|
its just a position. Look at the bdb code if you want to see a case
|
2004-08-12 20:57:18 -07:00
|
|
|
where something other then a number is stored.
|
|
|
|
*/
|
|
|
|
void ha_tina::position(const byte *record)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::position");
|
2005-02-19 10:51:49 +01:00
|
|
|
my_store_ptr(ref, ref_length, current_position);
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
|
|
|
Used to fetch a row from a posiion stored with ::position().
|
2005-02-19 10:51:49 +01:00
|
|
|
my_get_ptr() retrieves the data for you.
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
int ha_tina::rnd_pos(byte * buf, byte *pos)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::rnd_pos");
|
2006-01-20 00:40:56 +03:00
|
|
|
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
2005-02-19 10:51:49 +01:00
|
|
|
current_position= my_get_ptr(pos,ref_length);
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(find_current_row(buf));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
::info() is used to return information to the optimizer.
|
|
|
|
Currently this table handler doesn't implement most of the fields
|
|
|
|
really needed. SHOW also makes use of this data
|
|
|
|
*/
|
|
|
|
void ha_tina::info(uint flag)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::info");
|
|
|
|
/* This is a lie, but you don't want the optimizer to see zero or 1 */
|
2005-11-17 22:52:31 +01:00
|
|
|
if (!records_is_known && records < 2)
|
2004-08-12 20:57:18 -07:00
|
|
|
records= 2;
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Grab bag of flags that are sent to the able handler every so often.
|
|
|
|
HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called.
|
|
|
|
You are not required to implement any of these.
|
|
|
|
*/
|
|
|
|
int ha_tina::extra(enum ha_extra_function operation)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::extra");
|
2006-01-19 05:56:06 +03:00
|
|
|
if (operation == HA_EXTRA_MARK_AS_LOG_TABLE)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&share->mutex);
|
|
|
|
share->is_log_table= TRUE;
|
|
|
|
pthread_mutex_unlock(&share->mutex);
|
|
|
|
}
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-09-29 01:00:47 +04:00
|
|
|
Called after each table scan. In particular after deletes,
|
|
|
|
and updates. In the last case we employ chain of deleted
|
|
|
|
slots to clean up all of the dead space we have collected while
|
|
|
|
performing deletes/updates.
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
|
|
|
int ha_tina::rnd_end()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("ha_tina::rnd_end");
|
|
|
|
|
2005-11-17 22:52:31 +01:00
|
|
|
records_is_known= 1;
|
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
/* First position will be truncate position, second will be increment */
|
|
|
|
if ((chain_ptr - chain) > 0)
|
|
|
|
{
|
|
|
|
tina_set *ptr;
|
2006-04-04 09:59:19 +02:00
|
|
|
size_t length;
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
Setting up writable map, this will contain all of the data after the
|
|
|
|
get_mmap call that we have added to the file.
|
|
|
|
*/
|
2005-09-29 01:00:47 +04:00
|
|
|
if (get_mmap(share, 1) > 0)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
length= share->file_stat.st_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
The sort handles updates/deletes with random orders.
|
|
|
|
It also sorts so that we move the final blocks to the
|
|
|
|
beginning so that we move the smallest amount of data possible.
|
|
|
|
*/
|
2005-09-29 01:00:47 +04:00
|
|
|
qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set),
|
|
|
|
(qsort_cmp)sort_set);
|
2004-08-12 20:57:18 -07:00
|
|
|
for (ptr= chain; ptr < chain_ptr; ptr++)
|
|
|
|
{
|
2005-11-05 15:08:15 +03:00
|
|
|
memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
|
|
|
|
length - (size_t)ptr->end);
|
2004-08-12 20:57:18 -07:00
|
|
|
length= length - (size_t)(ptr->end - ptr->begin);
|
|
|
|
}
|
|
|
|
|
2006-04-04 09:59:19 +02:00
|
|
|
/* Unmap the file before the new size is set */
|
|
|
|
if (my_munmap(share->mapped_file, share->file_stat.st_size))
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
2006-04-04 09:59:19 +02:00
|
|
|
/* We set it to null so that get_mmap() won't try to unmap it */
|
|
|
|
share->mapped_file= NULL;
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2006-04-04 09:59:19 +02:00
|
|
|
/* Set the file to the new size */
|
|
|
|
if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
if (get_mmap(share, 0) > 0)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2005-11-24 02:56:12 +02:00
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
/*
|
|
|
|
Repair CSV table in the case, it is crashed.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
repair()
|
|
|
|
thd The thread, performing repair
|
|
|
|
check_opt The options for repair. We do not use it currently.
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
If the file is empty, change # of rows in the file and complete recovery.
|
|
|
|
Otherwise, scan the table looking for bad rows. If none were found,
|
|
|
|
we mark file as a good one and return. If a bad row was encountered,
|
|
|
|
we truncate the datafile up to the last good row.
|
|
|
|
|
|
|
|
TODO: Make repair more clever - it should try to recover subsequent
|
|
|
|
rows (after the first bad one) as well.
|
|
|
|
*/
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
|
|
|
|
{
|
|
|
|
char repaired_fname[FN_REFLEN];
|
|
|
|
byte *buf;
|
|
|
|
File repair_file;
|
|
|
|
int rc;
|
|
|
|
ha_rows rows_repaired= 0;
|
|
|
|
DBUG_ENTER("ha_tina::repair");
|
|
|
|
|
|
|
|
/* empty file */
|
|
|
|
if (!share->mapped_file)
|
|
|
|
{
|
|
|
|
share->rows_recorded= 0;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
|
|
|
|
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Local_saved_data_file_length is initialized during the lock phase.
|
|
|
|
Sometimes this is not getting executed before ::repair (e.g. for
|
|
|
|
the log tables). We set it manually here.
|
|
|
|
*/
|
|
|
|
local_saved_data_file_length= share->file_stat.st_size;
|
2006-03-29 06:28:57 +04:00
|
|
|
/* set current position to the beginning of the file */
|
|
|
|
current_position= next_position= 0;
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
/* Read the file row-by-row. If everything is ok, repair is not needed. */
|
|
|
|
while (!(rc= find_current_row(buf)))
|
|
|
|
{
|
|
|
|
rows_repaired++;
|
|
|
|
current_position= next_position;
|
|
|
|
}
|
|
|
|
|
|
|
|
my_free((char*)buf, MYF(0));
|
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
if (rc == HA_ERR_END_OF_FILE)
|
|
|
|
{
|
2006-04-11 12:12:48 +02:00
|
|
|
/* All rows were read ok until end of file, the file does not need repair. */
|
|
|
|
|
2006-03-29 06:28:57 +04:00
|
|
|
/*
|
|
|
|
If rows_recorded != rows_repaired, we should update
|
|
|
|
rows_recorded value to the current amount of rows.
|
|
|
|
*/
|
|
|
|
share->rows_recorded= rows_repaired;
|
2006-03-13 19:36:34 +03:00
|
|
|
goto end;
|
2006-03-29 06:28:57 +04:00
|
|
|
}
|
2006-03-13 19:36:34 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
Otherwise we've encountered a bad row => repair is needed.
|
|
|
|
Let us create a temporary file.
|
|
|
|
*/
|
|
|
|
if ((repair_file= my_create(fn_format(repaired_fname, share->table_name,
|
|
|
|
"", CSN_EXT,
|
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME),
|
|
|
|
0, O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
|
|
|
|
|
|
|
|
if (my_write(repair_file, (byte*)share->mapped_file, current_position,
|
|
|
|
MYF(MY_NABP)))
|
|
|
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
|
|
|
my_close(repair_file, MYF(0));
|
2006-03-29 06:28:57 +04:00
|
|
|
/* we just truncated the file up to the first bad row. update rows count. */
|
2006-03-13 19:36:34 +03:00
|
|
|
share->rows_recorded= rows_repaired;
|
|
|
|
|
|
|
|
if (my_munmap(share->mapped_file, share->file_stat.st_size))
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
/* We set it to null so that get_mmap() won't try to unmap it */
|
|
|
|
share->mapped_file= NULL;
|
2006-04-11 12:12:48 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Close the "to"-file before renaming
|
|
|
|
On Windows one cannot rename a file, which descriptor
|
|
|
|
is still open. EACCES will be returned when trying to delete
|
|
|
|
the "to"-file in my_rename()
|
|
|
|
*/
|
|
|
|
my_close(share->data_file,MYF(0));
|
|
|
|
|
|
|
|
if (my_rename(repaired_fname, share->data_file_name, MYF(0)))
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
/* Open the file again, it should now be repaired */
|
|
|
|
if ((share->data_file= my_open(share->data_file_name, O_RDWR|O_APPEND,
|
|
|
|
MYF(0))) == -1)
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
if (get_mmap(share, 0) > 0)
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
end:
|
|
|
|
share->crashed= FALSE;
|
|
|
|
DBUG_RETURN(HA_ADMIN_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-11-24 02:56:12 +02:00
|
|
|
DELETE without WHERE calls this
|
2004-08-12 20:57:18 -07:00
|
|
|
*/
|
2005-11-24 02:56:12 +02:00
|
|
|
|
2004-08-12 20:57:18 -07:00
|
|
|
int ha_tina::delete_all_rows()
|
|
|
|
{
|
2006-03-29 17:04:00 +03:00
|
|
|
int rc;
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_ENTER("ha_tina::delete_all_rows");
|
|
|
|
|
2005-11-17 22:52:31 +01:00
|
|
|
if (!records_is_known)
|
2006-03-29 17:04:00 +03:00
|
|
|
DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND);
|
2005-11-17 22:52:31 +01:00
|
|
|
|
2006-04-04 09:59:19 +02:00
|
|
|
/* Unmap the file before the new size is set */
|
|
|
|
if (share->mapped_file && my_munmap(share->mapped_file,
|
|
|
|
share->file_stat.st_size))
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
share->mapped_file= NULL;
|
|
|
|
|
|
|
|
/* Truncate the file to zero size */
|
2006-03-29 17:04:00 +03:00
|
|
|
rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME));
|
2004-08-12 20:57:18 -07:00
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
if (get_mmap(share, 0) > 0)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2005-11-17 22:52:31 +01:00
|
|
|
records=0;
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(rc);
|
|
|
|
}
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
/*
|
2004-08-12 20:57:18 -07:00
|
|
|
Called by the database to lock the table. Keep in mind that this
|
|
|
|
is an internal lock.
|
|
|
|
*/
|
|
|
|
THR_LOCK_DATA **ha_tina::store_lock(THD *thd,
|
|
|
|
THR_LOCK_DATA **to,
|
|
|
|
enum thr_lock_type lock_type)
|
|
|
|
{
|
|
|
|
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
|
|
|
|
lock.type=lock_type;
|
|
|
|
*to++= &lock;
|
|
|
|
return to;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Create a table. You do not want to leave the table open after a call to
|
|
|
|
this (the database will call ::open() if it needs to).
|
|
|
|
*/
|
|
|
|
|
2005-09-29 01:00:47 +04:00
|
|
|
int ha_tina::create(const char *name, TABLE *table_arg,
|
|
|
|
HA_CREATE_INFO *create_info)
|
2004-08-12 20:57:18 -07:00
|
|
|
{
|
|
|
|
char name_buff[FN_REFLEN];
|
|
|
|
File create_file;
|
|
|
|
DBUG_ENTER("ha_tina::create");
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
if ((create_file= my_create(fn_format(name_buff, name, "", CSM_EXT,
|
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME), 0,
|
|
|
|
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
|
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
|
|
|
write_meta_file(create_file, 0, FALSE);
|
|
|
|
my_close(create_file, MYF(0));
|
|
|
|
|
|
|
|
if ((create_file= my_create(fn_format(name_buff, name, "", CSV_EXT,
|
2005-09-29 01:00:47 +04:00
|
|
|
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
|
2005-06-01 17:34:10 -07:00
|
|
|
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
|
2004-08-12 20:57:18 -07:00
|
|
|
DBUG_RETURN(-1);
|
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
my_close(create_file, MYF(0));
|
2004-08-12 20:57:18 -07:00
|
|
|
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2004-08-17 01:29:19 -07:00
|
|
|
|
2006-03-13 19:36:34 +03:00
|
|
|
int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
|
|
|
|
{
|
|
|
|
int rc= 0;
|
|
|
|
byte *buf;
|
|
|
|
const char *old_proc_info;
|
|
|
|
ha_rows count= share->rows_recorded;
|
|
|
|
DBUG_ENTER("ha_tina::check");
|
|
|
|
|
|
|
|
old_proc_info= thd_proc_info(thd, "Checking table");
|
|
|
|
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
|
|
|
|
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Local_saved_data_file_length is initialized during the lock phase.
|
|
|
|
Check does not use store_lock in certain cases. So, we set it
|
|
|
|
manually here.
|
|
|
|
*/
|
|
|
|
local_saved_data_file_length= share->file_stat.st_size;
|
|
|
|
/* set current position to the beginning of the file */
|
|
|
|
current_position= next_position= 0;
|
|
|
|
/* Read the file row-by-row. If everything is ok, repair is not needed. */
|
|
|
|
while (!(rc= find_current_row(buf)))
|
|
|
|
{
|
|
|
|
count--;
|
|
|
|
current_position= next_position;
|
|
|
|
}
|
|
|
|
|
|
|
|
my_free((char*)buf, MYF(0));
|
|
|
|
thd_proc_info(thd, old_proc_info);
|
|
|
|
|
|
|
|
if ((rc != HA_ERR_END_OF_FILE) || count)
|
|
|
|
{
|
2006-03-29 06:28:57 +04:00
|
|
|
share->crashed= TRUE;
|
2006-03-13 19:36:34 +03:00
|
|
|
DBUG_RETURN(HA_ADMIN_CORRUPT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
DBUG_RETURN(HA_ADMIN_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-02-07 22:42:57 -08:00
|
|
|
bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info,
|
|
|
|
uint table_changes)
|
|
|
|
{
|
|
|
|
return COMPATIBLE_DATA_YES;
|
|
|
|
}
|
|
|
|
|
2006-04-13 13:49:29 -07:00
|
|
|
|
|
|
|
mysql_declare_plugin(csv)
|
2005-12-21 10:18:40 -08:00
|
|
|
{
|
|
|
|
MYSQL_STORAGE_ENGINE_PLUGIN,
|
|
|
|
&tina_hton,
|
2006-05-02 04:11:00 -07:00
|
|
|
tina_hton_name,
|
2005-12-21 10:18:40 -08:00
|
|
|
"Brian Aker, MySQL AB",
|
2006-05-02 04:11:00 -07:00
|
|
|
tina_hton_comment,
|
2005-12-21 10:18:40 -08:00
|
|
|
tina_init_func, /* Plugin Init */
|
2005-12-21 12:50:50 -08:00
|
|
|
tina_done_func, /* Plugin Deinit */
|
|
|
|
0x0100 /* 1.0 */,
|
2005-12-21 10:18:40 -08:00
|
|
|
}
|
|
|
|
mysql_declare_plugin_end;
|
2006-04-13 13:49:29 -07:00
|
|
|
|