mariadb/sql/sql_delete.cc
unknown f631b361b6 Table definition cache, part 2
The table opening process now works the following way:
- Create common TABLE_SHARE object
- Read the .frm file and unpack it into the TABLE_SHARE object
- Create a TABLE object based on the information in the TABLE_SHARE
  object and open a handler to the table object

Other noteworthy changes:
- In TABLE_SHARE the most common strings are now LEX_STRING's
- Better error message when table is not found
- Variable table_cache is now renamed 'table_open_cache'
- New variable 'table_definition_cache' that is the number of table defintions that will be cached
- strxnmov() calls are now fixed to avoid overflows
- strxnmov() will now always add one end \0 to result
- engine objects are now created with a TABLE_SHARE object instead of a TABLE object.
- After creating a field object one must call field->init(table) before using it

- For a busy system this change will give you:
 - Less memory usage for table object
 - Faster opening of tables (if it's has been in use or is in table definition cache)
 - Allow you to cache many table definitions objects
 - Faster drop of table


mysql-test/mysql-test-run.sh:
  Fixed some problems with --gdb option
  Test both with socket and tcp/ip port that all old servers are killed
mysql-test/r/flush_table.result:
  More tests with lock table with 2 threads + flush table
mysql-test/r/information_schema.result:
  Removed old (now wrong) result
mysql-test/r/innodb.result:
  Better error messages (thanks to TDC patch)
mysql-test/r/merge.result:
  Extra flush table test
mysql-test/r/ndb_bitfield.result:
  Better error messages (thanks to TDC patch)
mysql-test/r/ndb_partition_error.result:
  Better error messages (thanks to TDC patch)
mysql-test/r/query_cache.result:
  Remove tables left from old tests
mysql-test/r/temp_table.result:
  Test truncate with temporary tables
mysql-test/r/variables.result:
  Table_cache -> Table_open_cache
mysql-test/t/flush_table.test:
  More tests with lock table with 2 threads + flush table
mysql-test/t/merge.test:
  Extra flush table test
mysql-test/t/multi_update.test:
  Added 'sleep' to make test predictable
mysql-test/t/query_cache.test:
  Remove tables left from old tests
mysql-test/t/temp_table.test:
  Test truncate with temporary tables
mysql-test/t/variables.test:
  Table_cache -> Table_open_cache
mysql-test/valgrind.supp:
  Remove warning that may happens becasue threads dies in different order
mysys/hash.c:
  Fixed wrong DBUG_PRINT
mysys/mf_dirname.c:
  More DBUG
mysys/mf_pack.c:
  Better comment
mysys/mf_tempdir.c:
  More DBUG
  Ensure that we call cleanup_dirname() on all temporary directory paths.
  
  If we don't do this, we will get a failure when comparing temporary table
  names as in some cases the temporary table name is run through convert_dirname())
mysys/my_alloc.c:
  Indentation fix
sql/examples/ha_example.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_example.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/examples/ha_tina.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/field.cc:
  Update for table definition cache:
  - Field creation now takes TABLE_SHARE instead of TABLE as argument
    (This is becasue field definitions are now cached in TABLE_SHARE)
    When a field is created, one now must call field->init(TABLE) before using it
  - Use s->db instead of s->table_cache_key
  - Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
  - make_field() takes TABLE_SHARE as argument instead of TABLE
  - move_field() -> move_field_offset()
sql/field.h:
  Update for table definition cache:
  - Field creation now takes TABLE_SHARE instead of TABLE as argument
    (This is becasue field definitions are now cached in TABLE_SHARE)
    When a field is created, one now must call field->init(TABLE) before using it
  - Added Field::clone() to create a field in TABLE from a field in TABLE_SHARE
  - make_field() takes TABLE_SHARE as argument instead of TABLE
  - move_field() -> move_field_offset()
sql/ha_archive.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_archive.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_berkeley.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Changed name of argument create() to not hide internal 'table' variable.
  table->s  -> table_share
sql/ha_berkeley.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_blackhole.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_federated.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Fixed comments
  Remove index variable and replace with pointers (simple optimization)
  move_field() -> move_field_offset()
  Removed some strlen() calls
sql/ha_federated.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_heap.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Simplify delete_table() and create() as the given file names are now without extension
sql/ha_heap.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_innodb.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisam.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Remove not needed fn_format()
  Fixed for new table->s structure
sql/ha_myisam.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_myisammrg.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Don't set 'is_view' for MERGE tables
  Use new interface to find_temporary_table()
sql/ha_myisammrg.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Added flag HA_NO_COPY_ON_ALTER
sql/ha_ndbcluster.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Fixed wrong calls to strxnmov()
  Give error HA_ERR_TABLE_DEF_CHANGED if table definition has changed
  drop_table -> intern_drop_table()
  table->s -> table_share
  Move part_info to TABLE
  Fixed comments & DBUG print's
  New arguments to print_error()
sql/ha_ndbcluster.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
sql/ha_partition.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  We can't set up or use part_info when creating handler as there is not yet any table object
  New ha_intialise() to work with TDC (Done by Mikael)
sql/ha_partition.h:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  Got set_part_info() from Mikael
sql/handler.cc:
  We new use TABLE_SHARE instead of TABLE when creating engine handlers
  ha_delete_table() now also takes database as an argument
  handler::ha_open() now takes TABLE as argument
  ha_open() now calls ha_allocate_read_write_set()
  Simplify ha_allocate_read_write_set()
  Remove ha_deallocate_read_write_set()
  Use table_share (Cached by table definition cache)
sql/handler.h:
  New table flag: HA_NO_COPY_ON_ALTER (used by merge tables)
  Remove ha_deallocate_read_write_set()
  get_new_handler() now takes TABLE_SHARE as argument
  ha_delete_table() now gets database as argument
sql/item.cc:
  table_name and db are now LEX_STRING objects
  When creating fields, we have now have to call field->init(table)
  move_field -> move_field_offset()
sql/item.h:
  tmp_table_field_from_field_type() now takes an extra paramenter 'fixed_length' to allow one to force usage of CHAR
   instead of BLOB
sql/item_cmpfunc.cc:
  Fixed call to tmp_table_field_from_field_type()
sql/item_create.cc:
  Assert if new not handled cast type
sql/item_func.cc:
  When creating fields, we have now have to call field->init(table)
  dummy_table used by 'sp' now needs a TABLE_SHARE object
sql/item_subselect.cc:
  Trivial code cleanups
sql/item_sum.cc:
  When creating fields, we have now have to call field->init(table)
sql/item_timefunc.cc:
  Item_func_str_to_date::tmp_table_field() now replaced by call to
   tmp_table_field_from_field_type() (see item_timefunc.h)
sql/item_timefunc.h:
  Simply tmp_table_field()
sql/item_uniq.cc:
  When creating fields, we have now have to call field->init(table)
sql/key.cc:
  Added 'KEY' argument to 'find_ref_key' to simplify code
sql/lock.cc:
  More debugging
  Use create_table_def_key() to create key for table cache
  Allocate TABLE_SHARE properly when creating name lock
  Fix that locked_table_name doesn't test same table twice
sql/mysql_priv.h:
  New functions for table definition cache
  New interfaces to a lot of functions.
  New faster interface to find_temporary_table() and close_temporary_table()
sql/mysqld.cc:
  Added support for table definition cache of size 'table_def_size'
  Fixed som calls to strnmov()
  Changed name of 'table_cache' to 'table_open_cache'
sql/opt_range.cc:
  Use new interfaces
  Fixed warnings from valgrind
sql/parse_file.cc:
  Safer calls to strxnmov()
  Fixed typo
sql/set_var.cc:
  Added variable 'table_definition_cache'
  Variable table_cache renamed to 'table_open_cache'
sql/slave.cc:
  Use new interface
sql/sp.cc:
  Proper use of TABLE_SHARE
sql/sp_head.cc:
  Remove compiler warnings
  We have now to call field->init(table)
sql/sp_head.h:
  Pointers to parsed strings are now const
sql/sql_acl.cc:
  table_name is now a LEX_STRING
sql/sql_base.cc:
  Main implementation of table definition cache
  (The #ifdef's are there for the future when table definition cache will replace open table cache)
  Now table definitions are cached indepndent of open tables, which will speed up things when a table is in use at once from several places
  Views are not yet cached; For the moment we only cache if a table is a view or not.
  
  Faster implementation of find_temorary_table()
  Replace 'wait_for_refresh()' with the more general function 'wait_for_condition()'
  Drop table is slightly faster as we can use the table definition cache to know the type of the table
sql/sql_cache.cc:
  table_cache_key and table_name are now LEX_STRING
  'sDBUG print fixes
sql/sql_class.cc:
  table_cache_key is now a LEX_STRING
  safer strxnmov()
sql/sql_class.h:
  Added number of open table shares (table definitions)
sql/sql_db.cc:
  safer strxnmov()
sql/sql_delete.cc:
  Use new interface to find_temporary_table()
sql/sql_derived.cc:
  table_name is now a LEX_STRING
sql/sql_handler.cc:
  TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_insert.cc:
  TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
sql/sql_lex.cc:
  Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_lex.h:
  Make parsed string a const (to quickly find out if anything is trying to change the query string)
sql/sql_load.cc:
  Safer strxnmov()
sql/sql_parse.cc:
  Better error if wrong DB name
sql/sql_partition.cc:
  part_info moved to TABLE from TABLE_SHARE
  Indentation changes
sql/sql_select.cc:
  Indentation fixes
  Call field->init(TABLE) for new created fields
  Update create_tmp_table() to use TABLE_SHARE properly
sql/sql_select.h:
  Call field->init(TABLE) for new created fields
sql/sql_show.cc:
  table_name is now a LEX_STRING
  part_info moved to TABLE
sql/sql_table.cc:
  Use table definition cache to speed up delete of tables
  Fixed calls to functions with new interfaces
  Don't use 'share_not_to_be_used'
  Instead of doing openfrm() when doing repair, we now have to call
  get_table_share() followed by open_table_from_share().
  Replace some fn_format() with faster unpack_filename().
  Safer strxnmov()
  part_info is now in TABLE
  Added Mikaels patch for partition and ALTER TABLE
  Instead of using 'TABLE_SHARE->is_view' use 'table_flags() & HA_NO_COPY_ON_ALTER
sql/sql_test.cc:
  table_name and table_cache_key are now LEX_STRING's
sql/sql_trigger.cc:
  TABLE_SHARE->db and TABLE_SHARE->table_name are now LEX_STRING's
  safer strxnmov()
  Removed compiler warnings
sql/sql_update.cc:
  Call field->init(TABLE) after field is created
sql/sql_view.cc:
  safer strxnmov()
  Create common TABLE_SHARE object for views to allow us to cache if table is a view
sql/structs.h:
  Added SHOW_TABLE_DEFINITIONS
sql/table.cc:
  Creation and destruct of TABLE_SHARE objects that are common for many TABLE objects
  
  The table opening process now works the following way:
  - Create common TABLE_SHARE object
  - Read the .frm file and unpack it into the TABLE_SHARE object
  - Create a TABLE object based on the information in the TABLE_SHARE
    object and open a handler to the table object
  
  open_table_def() is written in such a way that it should be trival to add parsing of the .frm files in new formats
sql/table.h:
  TABLE objects for the same database table now share a common TABLE_SHARE object
  In TABLE_SHARE the most common strings are now LEX_STRING's
sql/unireg.cc:
  Changed arguments to rea_create_table() to have same order as other functions
  Call field->init(table) for new created fields
sql/unireg.h:
  Added OPEN_VIEW
strings/strxnmov.c:
  Change strxnmov() to always add end \0
  This makes usage of strxnmov() safer as most of MySQL code assumes that strxnmov() will create a null terminated string
2005-11-23 22:45:02 +02:00

913 lines
25 KiB
C++

/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
Delete of records and truncate of tables.
Multi-table deletes were introduced by Monty and Sinisa
*/
#include "mysql_priv.h"
#include "ha_innodb.h"
#include "sql_select.h"
#include "sp_head.h"
#include "sql_trigger.h"
bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SQL_LIST *order, ha_rows limit, ulonglong options,
bool reset_auto_increment)
{
bool will_batch;
int error, loc_error;
TABLE *table;
SQL_SELECT *select=0;
READ_RECORD info;
bool using_limit=limit != HA_POS_ERROR;
bool transactional_table, safe_update, const_cond;
ha_rows deleted;
uint usable_index= MAX_KEY;
SELECT_LEX *select_lex= &thd->lex->select_lex;
DBUG_ENTER("mysql_delete");
if (open_and_lock_tables(thd, table_list))
DBUG_RETURN(TRUE);
if (!(table= table_list->table))
{
my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
table_list->view_db.str, table_list->view_name.str);
DBUG_RETURN(TRUE);
}
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
thd->proc_info="init";
table->map=1;
if (mysql_prepare_delete(thd, table_list, &conds))
DBUG_RETURN(TRUE);
const_cond= (!conds || conds->const_item());
safe_update=test(thd->options & OPTION_SAFE_UPDATES);
if (safe_update && const_cond)
{
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
DBUG_RETURN(TRUE);
}
select_lex->no_error= thd->lex->ignore;
/*
Test if the user wants to delete all rows and deletion doesn't have
any side-effects (because of triggers), so we can use optimized
handler::delete_all_rows() method.
*/
if (!using_limit && const_cond && (!conds || conds->val_int()) &&
!(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) &&
!(table->triggers && table->triggers->has_delete_triggers()))
{
deleted= table->file->records;
if (!(error=table->file->delete_all_rows()))
{
error= -1; // ok
goto cleanup;
}
if (error != HA_ERR_WRONG_COMMAND)
{
table->file->print_error(error,MYF(0));
error=0;
goto cleanup;
}
/* Handler didn't support fast delete; Delete rows one by one */
}
table->used_keys.clear_all();
table->quick_keys.clear_all(); // Can't use 'only index'
select=make_select(table, 0, 0, conds, 0, &error);
if (error)
DBUG_RETURN(TRUE);
if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
{
delete select;
free_underlaid_joins(thd, select_lex);
thd->row_count_func= 0;
send_ok(thd,0L);
/*
We don't need to call reset_auto_increment in this case, because
mysql_truncate always gives a NULL conds argument, hence we never
get here.
*/
DBUG_RETURN(0); // Nothing to delete
}
/* If running in safe sql mode, don't allow updates without keys */
if (table->quick_keys.is_clear_all())
{
thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
if (safe_update && !using_limit)
{
delete select;
free_underlaid_joins(thd, select_lex);
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
DBUG_RETURN(TRUE);
}
}
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_QUICK);
if (order && order->elements)
{
uint length;
SORT_FIELD *sortorder;
TABLE_LIST tables;
List<Item> fields;
List<Item> all_fields;
ha_rows examined_rows;
bzero((char*) &tables,sizeof(tables));
tables.table = table;
tables.alias = table_list->alias;
if (select_lex->setup_ref_array(thd, order->elements) ||
setup_order(thd, select_lex->ref_pointer_array, &tables,
fields, all_fields, (ORDER*) order->first))
{
delete select;
free_underlaid_joins(thd, &thd->lex->select_lex);
DBUG_RETURN(TRUE);
}
if (!select && limit != HA_POS_ERROR)
usable_index= get_index_for_order(table, (ORDER*)(order->first), limit);
if (usable_index == MAX_KEY)
{
table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
if (!(sortorder= make_unireg_sortorder((ORDER*) order->first,
&length)) ||
(table->sort.found_records = filesort(thd, table, sortorder, length,
select, HA_POS_ERROR,
&examined_rows))
== HA_POS_ERROR)
{
delete select;
free_underlaid_joins(thd, &thd->lex->select_lex);
DBUG_RETURN(TRUE);
}
/*
Filesort has already found and selected the rows we want to delete,
so we don't need the where clause
*/
delete select;
free_underlaid_joins(thd, select_lex);
select= 0;
}
}
/* If quick select is used, initialize it before retrieving rows. */
if (select && select->quick && select->quick->reset())
{
delete select;
free_underlaid_joins(thd, select_lex);
DBUG_RETURN(TRUE);
}
if (usable_index==MAX_KEY)
init_read_record(&info,thd,table,select,1,1);
else
init_read_record_idx(&info, thd, table, 1, usable_index);
deleted=0L;
init_ftfuncs(thd, select_lex, 1);
thd->proc_info="updating";
will_batch= !table->file->start_bulk_delete();
while (!(error=info.read_record(&info)) && !thd->killed &&
!thd->net.report_error)
{
// thd->net.report_error is tested to disallow delete row on error
if (!(select && select->skip_record())&& !thd->net.report_error )
{
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_BEFORE, FALSE))
{
error= 1;
break;
}
if (!(error= table->file->delete_row(table->record[0])))
{
deleted++;
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_AFTER, FALSE))
{
error= 1;
break;
}
if (!--limit && using_limit)
{
error= -1;
break;
}
}
else
{
table->file->print_error(error,MYF(0));
/*
In < 4.0.14 we set the error number to 0 here, but that
was not sensible, because then MySQL would not roll back the
failed DELETE, and also wrote it to the binlog. For MyISAM
tables a DELETE probably never should fail (?), but for
InnoDB it can fail in a FOREIGN KEY error or an
out-of-tablespace error.
*/
error= 1;
break;
}
}
else
table->file->unlock_row(); // Row failed selection, release lock on it
}
if (thd->killed && !error)
error= 1; // Aborted
if (will_batch && (loc_error= table->file->end_bulk_delete()))
{
if (error != 1)
table->file->print_error(loc_error,MYF(0));
error=1;
}
thd->proc_info= "end";
end_read_record(&info);
free_io_cache(table); // Will not do any harm
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_NORMAL);
if (reset_auto_increment && (error < 0))
{
/*
We're really doing a truncate and need to reset the table's
auto-increment counter.
*/
int error2= table->file->reset_auto_increment(0);
if (error2 && (error2 != HA_ERR_WRONG_COMMAND))
{
table->file->print_error(error2, MYF(0));
error= 1;
}
}
cleanup:
/*
Invalidate the table in the query cache if something changed. This must
be before binlog writing and ha_autocommit_...
*/
if (deleted)
{
query_cache_invalidate3(thd, table_list, 1);
}
delete select;
transactional_table= table->file->has_transactions();
/* See similar binlogging code in sql_update.cc, for comments */
if ((error < 0) || (deleted && !transactional_table))
{
if (mysql_bin_log.is_open())
{
if (error < 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
transactional_table, FALSE);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1;
}
if (!transactional_table)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}
if (transactional_table)
{
if (ha_autocommit_or_rollback(thd,error >= 0))
error=1;
}
if (thd->lock)
{
mysql_unlock_tables(thd, thd->lock);
thd->lock=0;
}
free_underlaid_joins(thd, select_lex);
if (error < 0)
{
thd->row_count_func= deleted;
send_ok(thd,deleted);
DBUG_PRINT("info",("%d records deleted",deleted));
}
DBUG_RETURN(error >= 0 || thd->net.report_error);
}
/*
Prepare items in DELETE statement
SYNOPSIS
mysql_prepare_delete()
thd - thread handler
table_list - global/local table list
conds - conditions
RETURN VALUE
FALSE OK
TRUE error
*/
bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds)
{
SELECT_LEX *select_lex= &thd->lex->select_lex;
DBUG_ENTER("mysql_prepare_delete");
thd->allow_sum_func= 0;
if (setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
table_list, conds, &select_lex->leaf_tables,
FALSE) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
setup_ftfuncs(select_lex))
DBUG_RETURN(TRUE);
if (!table_list->updatable || check_key_in_view(thd, table_list))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE");
DBUG_RETURN(TRUE);
}
{
TABLE_LIST *duplicate;
if ((duplicate= unique_table(table_list, table_list->next_global)))
{
update_non_unique_table_error(table_list, "DELETE", duplicate);
DBUG_RETURN(TRUE);
}
}
select_lex->fix_prepare_information(thd, conds);
DBUG_RETURN(FALSE);
}
/***************************************************************************
Delete multiple tables from join
***************************************************************************/
#define MEM_STRIP_BUF_SIZE current_thd->variables.sortbuff_size
extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b)
{
handler *file= (handler*)arg;
return file->cmp_ref((const byte*)a, (const byte*)b);
}
/*
make delete specific preparation and checks after opening tables
SYNOPSIS
mysql_multi_delete_prepare()
thd thread handler
RETURN
FALSE OK
TRUE Error
*/
bool mysql_multi_delete_prepare(THD *thd)
{
LEX *lex= thd->lex;
TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxilliary_table_list.first;
TABLE_LIST *target_tbl;
DBUG_ENTER("mysql_multi_delete_prepare");
/*
setup_tables() need for VIEWs. JOIN::prepare() will not do it second
time.
lex->query_tables also point on local list of DELETE SELECT_LEX
*/
if (setup_tables(thd, &thd->lex->select_lex.context,
&thd->lex->select_lex.top_join_list,
lex->query_tables, &lex->select_lex.where,
&lex->select_lex.leaf_tables, FALSE))
DBUG_RETURN(TRUE);
/*
Multi-delete can't be constructed over-union => we always have
single SELECT on top and have to check underlying SELECTs of it
*/
lex->select_lex.exclude_from_table_unique_test= TRUE;
/* Fix tables-to-be-deleted-from list to point at opened tables */
for (target_tbl= (TABLE_LIST*) aux_tables;
target_tbl;
target_tbl= target_tbl->next_local)
{
if (!(target_tbl->table= target_tbl->correspondent_table->table))
{
DBUG_ASSERT(target_tbl->correspondent_table->view &&
target_tbl->correspondent_table->merge_underlying_list &&
target_tbl->correspondent_table->merge_underlying_list->
next_local);
my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
target_tbl->correspondent_table->view_db.str,
target_tbl->correspondent_table->view_name.str);
DBUG_RETURN(TRUE);
}
if (!target_tbl->correspondent_table->updatable ||
check_key_in_view(thd, target_tbl->correspondent_table))
{
my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
target_tbl->table_name, "DELETE");
DBUG_RETURN(TRUE);
}
/*
Check that table from which we delete is not used somewhere
inside subqueries/view.
*/
{
TABLE_LIST *duplicate;
if ((duplicate= unique_table(target_tbl->correspondent_table,
lex->query_tables)))
{
update_non_unique_table_error(target_tbl->correspondent_table,
"DELETE", duplicate);
DBUG_RETURN(TRUE);
}
}
}
DBUG_RETURN(FALSE);
}
multi_delete::multi_delete(TABLE_LIST *dt, uint num_of_tables_arg)
: delete_tables(dt), deleted(0), found(0),
num_of_tables(num_of_tables_arg), error(0),
do_delete(0), transactional_tables(0), normal_tables(0)
{
tempfiles= (Unique **) sql_calloc(sizeof(Unique *) * num_of_tables);
}
int
multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
{
DBUG_ENTER("multi_delete::prepare");
unit= u;
do_delete= 1;
thd->proc_info="deleting from main table";
DBUG_RETURN(0);
}
bool
multi_delete::initialize_tables(JOIN *join)
{
TABLE_LIST *walk;
Unique **tempfiles_ptr;
DBUG_ENTER("initialize_tables");
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
DBUG_RETURN(1);
table_map tables_to_delete_from=0;
for (walk= delete_tables; walk; walk= walk->next_local)
tables_to_delete_from|= walk->table->map;
walk= delete_tables;
delete_while_scanning= 1;
for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
tab < end;
tab++)
{
if (tab->table->map & tables_to_delete_from)
{
/* We are going to delete from this table */
TABLE *tbl=walk->table=tab->table;
walk= walk->next_local;
/* Don't use KEYREAD optimization on this table */
tbl->no_keyread=1;
/* Don't use record cache */
tbl->no_cache= 1;
tbl->used_keys.clear_all();
if (tbl->file->has_transactions())
transactional_tables= 1;
else
normal_tables= 1;
}
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
{
/*
We are not deleting from the table we are scanning. In this
case send_data() shouldn't delete any rows a we may touch
the rows in the deleted table many times
*/
delete_while_scanning= 0;
}
}
walk= delete_tables;
tempfiles_ptr= tempfiles;
if (delete_while_scanning)
{
table_being_deleted= delete_tables;
walk= walk->next_local;
}
for (;walk ;walk= walk->next_local)
{
TABLE *table=walk->table;
*tempfiles_ptr++= new Unique (refpos_order_cmp,
(void *) table->file,
table->file->ref_length,
MEM_STRIP_BUF_SIZE);
}
init_ftfuncs(thd, thd->lex->current_select, 1);
DBUG_RETURN(thd->is_fatal_error != 0);
}
multi_delete::~multi_delete()
{
for (table_being_deleted= delete_tables;
table_being_deleted;
table_being_deleted= table_being_deleted->next_local)
{
TABLE *table= table_being_deleted->table;
free_io_cache(table); // Alloced by unique
table->no_keyread=0;
}
for (uint counter= 0; counter < num_of_tables; counter++)
{
if (tempfiles[counter])
delete tempfiles[counter];
}
}
bool multi_delete::send_data(List<Item> &values)
{
int secure_counter= delete_while_scanning ? -1 : 0;
TABLE_LIST *del_table;
DBUG_ENTER("multi_delete::send_data");
for (del_table= delete_tables;
del_table;
del_table= del_table->next_local, secure_counter++)
{
TABLE *table= del_table->table;
/* Check if we are using outer join and we didn't find the row */
if (table->status & (STATUS_NULL_ROW | STATUS_DELETED))
continue;
table->file->position(table->record[0]);
found++;
if (secure_counter < 0)
{
/* We are scanning the current table */
DBUG_ASSERT(del_table == table_being_deleted);
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_BEFORE, FALSE))
DBUG_RETURN(1);
table->status|= STATUS_DELETED;
if (!(error=table->file->delete_row(table->record[0])))
{
deleted++;
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_AFTER, FALSE))
DBUG_RETURN(1);
}
else
{
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
}
else
{
error=tempfiles[secure_counter]->unique_add((char*) table->file->ref);
if (error)
{
error= 1; // Fatal error
DBUG_RETURN(1);
}
}
}
DBUG_RETURN(0);
}
void multi_delete::send_error(uint errcode,const char *err)
{
DBUG_ENTER("multi_delete::send_error");
/* First send error what ever it is ... */
my_message(errcode, err, MYF(0));
/* If nothing deleted return */
if (!deleted)
DBUG_VOID_RETURN;
/* Something already deleted so we have to invalidate cache */
query_cache_invalidate3(thd, delete_tables, 1);
/*
If rows from the first table only has been deleted and it is
transactional, just do rollback.
The same if all tables are transactional, regardless of where we are.
In all other cases do attempt deletes ...
*/
if ((table_being_deleted == delete_tables &&
table_being_deleted->table->file->has_transactions()) ||
!normal_tables)
ha_rollback_stmt(thd);
else if (do_delete)
{
/*
We have to execute the recorded do_deletes() and write info into the
error log
*/
error= 1;
send_eof();
}
DBUG_VOID_RETURN;
}
/*
Do delete from other tables.
Returns values:
0 ok
1 error
*/
int multi_delete::do_deletes()
{
int local_error= 0, counter= 0, error;
bool will_batch;
DBUG_ENTER("do_deletes");
DBUG_ASSERT(do_delete);
do_delete= 0; // Mark called
if (!found)
DBUG_RETURN(0);
table_being_deleted= (delete_while_scanning ? delete_tables->next_local :
delete_tables);
for (; table_being_deleted;
table_being_deleted= table_being_deleted->next_local, counter++)
{
TABLE *table = table_being_deleted->table;
if (tempfiles[counter]->get(table))
{
local_error=1;
break;
}
READ_RECORD info;
init_read_record(&info,thd,table,NULL,0,1);
/*
Ignore any rows not found in reference tables as they may already have
been deleted by foreign key handling
*/
info.ignore_not_found_rows= 1;
will_batch= !table->file->start_bulk_delete();
while (!(local_error=info.read_record(&info)) && !thd->killed)
{
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_BEFORE, FALSE))
{
local_error= 1;
break;
}
if ((local_error=table->file->delete_row(table->record[0])))
{
table->file->print_error(local_error,MYF(0));
break;
}
deleted++;
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_AFTER, FALSE))
{
local_error= 1;
break;
}
}
if (will_batch && (error= table->file->end_bulk_delete()))
{
if (!local_error)
{
local_error= error;
table->file->print_error(local_error,MYF(0));
}
}
end_read_record(&info);
if (thd->killed && !local_error)
local_error= 1;
if (local_error == -1) // End of file
local_error = 0;
}
DBUG_RETURN(local_error);
}
/*
Send ok to the client
return: 0 sucess
1 error
*/
bool multi_delete::send_eof()
{
thd->proc_info="deleting from reference tables";
/* Does deletes for the last n - 1 tables, returns 0 if ok */
int local_error= do_deletes(); // returns 0 if success
/* compute a total error to know if something failed */
local_error= local_error || error;
/* reset used flags */
thd->proc_info="end";
/*
We must invalidate the query cache before binlog writing and
ha_autocommit_...
*/
if (deleted)
{
query_cache_invalidate3(thd, delete_tables, 1);
}
if ((local_error == 0) || (deleted && normal_tables))
{
if (mysql_bin_log.is_open())
{
if (local_error == 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
transactional_tables, FALSE);
if (mysql_bin_log.write(&qinfo) && !normal_tables)
local_error=1; // Log write failed: roll back the SQL statement
}
if (!transactional_tables)
thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}
/* Commit or rollback the current SQL statement */
if (transactional_tables)
if (ha_autocommit_or_rollback(thd,local_error > 0))
local_error=1;
if (!local_error)
{
thd->row_count_func= deleted;
::send_ok(thd, deleted);
}
return 0;
}
/***************************************************************************
TRUNCATE TABLE
****************************************************************************/
/*
Optimize delete of all rows by doing a full generate of the table
This will work even if the .ISM and .ISD tables are destroyed
dont_send_ok should be set if:
- We should always wants to generate the table (even if the table type
normally can't safely do this.
- We don't want an ok to be sent to the end user.
- We don't want to log the truncate command
- If we want to have a name lock on the table on exit without errors.
*/
bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
{
HA_CREATE_INFO create_info;
char path[FN_REFLEN];
TABLE *table;
bool error;
DBUG_ENTER("mysql_truncate");
bzero((char*) &create_info,sizeof(create_info));
/* If it is a temporary table, close and regenerate it */
if (!dont_send_ok && (table= find_temporary_table(thd, table_list)))
{
db_type table_type= table->s->db_type;
TABLE_SHARE *share= table->s;
if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE))
goto trunc_by_del;
table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
close_temporary_table(thd, table, 0, 0); // Don't free share
ha_create_table(thd, share->normalized_path.str,
share->db.str, share->table_name.str, &create_info, 1);
// We don't need to call invalidate() because this table is not in cache
if ((error= (int) !(open_temporary_table(thd, share->path.str,
share->db.str,
share->table_name.str, 1))))
(void) rm_temporary_table(table_type, path);
free_table_share(share);
my_free((char*) table,MYF(0));
/*
If we return here we will not have logged the truncation to the bin log
and we will not send_ok() to the client.
*/
goto end;
}
(void) sprintf(path,"%s/%s/%s%s",mysql_data_home,table_list->db,
table_list->table_name,reg_ext);
fn_format(path, path, "", "", MY_UNPACK_FILENAME);
if (!dont_send_ok)
{
db_type table_type;
mysql_frm_type(thd, path, &table_type);
if (table_type == DB_TYPE_UNKNOWN)
{
my_error(ER_NO_SUCH_TABLE, MYF(0),
table_list->db, table_list->table_name);
DBUG_RETURN(TRUE);
}
if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE)
|| thd->lex->sphead)
goto trunc_by_del;
if (lock_and_wait_for_table_name(thd, table_list))
DBUG_RETURN(TRUE);
}
*fn_ext(path)=0; // Remove the .frm extension
error= ha_create_table(thd, path, table_list->db, table_list->table_name,
&create_info, 1);
query_cache_invalidate3(thd, table_list, 0);
end:
if (!dont_send_ok)
{
if (!error)
{
if (mysql_bin_log.is_open())
{
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
0, FALSE);
mysql_bin_log.write(&qinfo);
}
send_ok(thd); // This should return record count
}
VOID(pthread_mutex_lock(&LOCK_open));
unlock_table_name(thd, table_list);
VOID(pthread_mutex_unlock(&LOCK_open));
}
else if (error)
{
VOID(pthread_mutex_lock(&LOCK_open));
unlock_table_name(thd, table_list);
VOID(pthread_mutex_unlock(&LOCK_open));
}
DBUG_RETURN(error);
trunc_by_del:
/* Probably InnoDB table */
ulong save_options= thd->options;
table_list->lock_type= TL_WRITE;
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT);
ha_enable_transaction(thd, FALSE);
mysql_init_select(thd->lex);
error= mysql_delete(thd, table_list, (COND*) 0, (SQL_LIST*) 0,
HA_POS_ERROR, LL(0), TRUE);
ha_enable_transaction(thd, TRUE);
thd->options= save_options;
DBUG_RETURN(error);
}