2006-12-31 01:02:27 +01:00
|
|
|
/* Copyright (C) 2000-2006 MySQL AB
|
2001-12-06 13:10:51 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2006-12-23 20:17:15 +01:00
|
|
|
the Free Software Foundation; version 2 of the License.
|
2001-12-06 13:10:51 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
2001-12-06 13:10:51 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
|
|
|
|
|
|
/* Classes in mysql */
|
|
|
|
|
2005-05-04 15:05:56 +02:00
|
|
|
#ifdef USE_PRAGMA_INTERFACE
|
2000-07-31 21:29:14 +02:00
|
|
|
#pragma interface /* gcc class implementation */
|
|
|
|
#endif
|
|
|
|
|
2005-12-22 06:39:02 +01:00
|
|
|
#include "log.h"
|
|
|
|
#include "rpl_tblmap.h"
|
2002-01-20 03:16:52 +01:00
|
|
|
|
2007-04-12 08:58:04 +02:00
|
|
|
struct st_relay_log_info;
|
|
|
|
typedef st_relay_log_info RELAY_LOG_INFO;
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
class Query_log_event;
|
|
|
|
class Load_log_event;
|
2001-06-14 22:12:40 +02:00
|
|
|
class Slave_log_event;
|
Simplistic, experimental framework for Stored Procedures (SPs).
Implements creation and dropping of PROCEDUREs, IN, OUT, and INOUT parameters,
single-statement procedures, rudimentary multi-statement (begin-end) prodedures
(when the client can handle it), and local variables.
Missing most of the embedded SQL language, all attributes, FUNCTIONs, error handling,
reparses procedures at each call (no caching), etc, etc.
Certainly buggy too, but procedures can actually be created and called....
2002-12-08 19:59:22 +01:00
|
|
|
class sp_rcontext;
|
2003-07-03 15:58:37 +02:00
|
|
|
class sp_cache;
|
2007-04-26 05:38:12 +02:00
|
|
|
class Lex_input_stream;
|
2005-12-22 06:39:02 +01:00
|
|
|
class Rows_log_event;
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2001-05-11 22:26:12 +02:00
|
|
|
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
|
2004-05-19 00:18:54 +02:00
|
|
|
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
|
2004-12-31 11:04:35 +01:00
|
|
|
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
|
2002-08-13 01:18:39 +02:00
|
|
|
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
|
|
|
|
DELAY_KEY_WRITE_ALL };
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
enum enum_check_fields
|
|
|
|
{ CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL };
|
|
|
|
enum enum_mark_columns
|
|
|
|
{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
|
2003-10-11 22:26:39 +02:00
|
|
|
|
2003-04-02 15:16:19 +02:00
|
|
|
extern char internal_table_name[2];
|
2006-07-19 20:33:19 +02:00
|
|
|
extern char empty_c_string[1];
|
2004-11-12 13:34:00 +01:00
|
|
|
extern const char **errmesg;
|
2003-04-02 15:16:19 +02:00
|
|
|
|
2005-01-16 13:16:23 +01:00
|
|
|
#define TC_LOG_PAGE_SIZE 8192
|
|
|
|
#define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE)
|
|
|
|
|
|
|
|
#define TC_HEURISTIC_RECOVER_COMMIT 1
|
|
|
|
#define TC_HEURISTIC_RECOVER_ROLLBACK 2
|
|
|
|
extern uint tc_heuristic_recover;
|
|
|
|
|
2003-01-30 18:39:54 +01:00
|
|
|
typedef struct st_user_var_events
|
|
|
|
{
|
|
|
|
user_var_entry *user_var_event;
|
|
|
|
char *value;
|
|
|
|
ulong length;
|
|
|
|
Item_result type;
|
|
|
|
uint charset_number;
|
|
|
|
} BINLOG_USER_VAR_EVENT;
|
|
|
|
|
2005-02-22 00:15:31 +01:00
|
|
|
#define RP_LOCK_LOG_IS_ALREADY_LOCKED 1
|
|
|
|
#define RP_FORCE_ROTATE 2
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
typedef struct st_copy_info {
|
|
|
|
ha_rows records;
|
|
|
|
ha_rows deleted;
|
2004-03-15 18:36:16 +01:00
|
|
|
ha_rows updated;
|
2000-07-31 21:29:14 +02:00
|
|
|
ha_rows copied;
|
2002-06-11 10:20:31 +02:00
|
|
|
ha_rows error_count;
|
2000-07-31 21:29:14 +02:00
|
|
|
enum enum_duplicates handle_duplicates;
|
2002-06-11 10:20:31 +02:00
|
|
|
int escape_char, last_errno;
|
2004-12-31 11:04:35 +01:00
|
|
|
bool ignore;
|
|
|
|
/* for INSERT ... UPDATE */
|
2002-12-02 20:38:00 +01:00
|
|
|
List<Item> *update_fields;
|
|
|
|
List<Item> *update_values;
|
2005-01-04 12:46:53 +01:00
|
|
|
/* for VIEW ... WITH CHECK OPTION */
|
2004-09-29 15:35:01 +02:00
|
|
|
TABLE_LIST *view;
|
2000-07-31 21:29:14 +02:00
|
|
|
} COPY_INFO;
|
|
|
|
|
|
|
|
|
|
|
|
class key_part_spec :public Sql_alloc {
|
|
|
|
public:
|
|
|
|
const char *field_name;
|
|
|
|
uint length;
|
|
|
|
key_part_spec(const char *name,uint len=0) :field_name(name), length(len) {}
|
2004-04-21 12:15:43 +02:00
|
|
|
bool operator==(const key_part_spec& other) const;
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
/**
|
|
|
|
Construct a copy of this key_part_spec. field_name is copied
|
|
|
|
by-pointer as it is known to never change. At the same time
|
|
|
|
'length' may be reset in mysql_prepare_create_table, and this
|
|
|
|
is why we supply it with a copy.
|
|
|
|
|
|
|
|
@return If out of memory, 0 is returned and an error is set in
|
|
|
|
THD.
|
|
|
|
*/
|
|
|
|
key_part_spec *clone(MEM_ROOT *mem_root) const
|
|
|
|
{ return new (mem_root) key_part_spec(*this); }
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Alter_drop :public Sql_alloc {
|
|
|
|
public:
|
|
|
|
enum drop_type {KEY, COLUMN };
|
|
|
|
const char *name;
|
|
|
|
enum drop_type type;
|
|
|
|
Alter_drop(enum drop_type par_type,const char *par_name)
|
|
|
|
:name(par_name), type(par_type) {}
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
/**
|
|
|
|
Used to make a clone of this object for ALTER/CREATE TABLE
|
|
|
|
@sa comment for key_part_spec::clone
|
|
|
|
*/
|
|
|
|
Alter_drop *clone(MEM_ROOT *mem_root) const
|
|
|
|
{ return new (mem_root) Alter_drop(*this); }
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Alter_column :public Sql_alloc {
|
|
|
|
public:
|
|
|
|
const char *name;
|
|
|
|
Item *def;
|
|
|
|
Alter_column(const char *par_name,Item *literal)
|
|
|
|
:name(par_name), def(literal) {}
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
/**
|
|
|
|
Used to make a clone of this object for ALTER/CREATE TABLE
|
|
|
|
@sa comment for key_part_spec::clone
|
|
|
|
*/
|
|
|
|
Alter_column *clone(MEM_ROOT *mem_root) const
|
|
|
|
{ return new (mem_root) Alter_column(*this); }
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Key :public Sql_alloc {
|
|
|
|
public:
|
2002-06-02 20:22:20 +02:00
|
|
|
enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT, SPATIAL, FOREIGN_KEY};
|
2000-07-31 21:29:14 +02:00
|
|
|
enum Keytype type;
|
2006-05-03 18:40:52 +02:00
|
|
|
KEY_CREATE_INFO key_create_info;
|
2000-07-31 21:29:14 +02:00
|
|
|
List<key_part_spec> columns;
|
2002-06-02 20:22:20 +02:00
|
|
|
const char *name;
|
2004-05-11 23:29:52 +02:00
|
|
|
bool generated;
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2006-05-03 14:59:17 +02:00
|
|
|
Key(enum Keytype type_par, const char *name_arg,
|
|
|
|
KEY_CREATE_INFO *key_info_arg,
|
|
|
|
bool generated_arg, List<key_part_spec> &cols)
|
2006-05-03 18:40:52 +02:00
|
|
|
:type(type_par), key_create_info(*key_info_arg), columns(cols),
|
|
|
|
name(name_arg), generated(generated_arg)
|
2002-04-12 20:35:46 +02:00
|
|
|
{}
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
Key(const Key &rhs, MEM_ROOT *mem_root);
|
|
|
|
virtual ~Key() {}
|
2004-04-21 12:15:43 +02:00
|
|
|
/* Equality comparison of keys (ignoring name) */
|
2004-05-11 23:29:52 +02:00
|
|
|
friend bool foreign_key_prefix(Key *a, Key *b);
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
/**
|
|
|
|
Used to make a clone of this object for ALTER/CREATE TABLE
|
|
|
|
@sa comment for key_part_spec::clone
|
|
|
|
*/
|
|
|
|
virtual Key *clone(MEM_ROOT *mem_root) const
|
|
|
|
{ return new (mem_root) Key(*this, mem_root); }
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
2002-06-02 20:22:20 +02:00
|
|
|
class Table_ident;
|
|
|
|
|
|
|
|
class foreign_key: public Key {
|
|
|
|
public:
|
|
|
|
enum fk_match_opt { FK_MATCH_UNDEF, FK_MATCH_FULL,
|
|
|
|
FK_MATCH_PARTIAL, FK_MATCH_SIMPLE};
|
|
|
|
enum fk_option { FK_OPTION_UNDEF, FK_OPTION_RESTRICT, FK_OPTION_CASCADE,
|
|
|
|
FK_OPTION_SET_NULL, FK_OPTION_NO_ACTION, FK_OPTION_DEFAULT};
|
|
|
|
|
|
|
|
Table_ident *ref_table;
|
|
|
|
List<key_part_spec> ref_columns;
|
|
|
|
uint delete_opt, update_opt, match_opt;
|
|
|
|
foreign_key(const char *name_arg, List<key_part_spec> &cols,
|
|
|
|
Table_ident *table, List<key_part_spec> &ref_cols,
|
|
|
|
uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg)
|
2006-05-03 14:59:17 +02:00
|
|
|
:Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols),
|
2002-06-02 20:22:20 +02:00
|
|
|
ref_table(table), ref_columns(cols),
|
|
|
|
delete_opt(delete_opt_arg), update_opt(update_opt_arg),
|
|
|
|
match_opt(match_opt_arg)
|
|
|
|
{}
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
foreign_key(const foreign_key &rhs, MEM_ROOT *mem_root);
|
|
|
|
/**
|
|
|
|
Used to make a clone of this object for ALTER/CREATE TABLE
|
|
|
|
@sa comment for key_part_spec::clone
|
|
|
|
*/
|
|
|
|
virtual Key *clone(MEM_ROOT *mem_root) const
|
|
|
|
{ return new (mem_root) foreign_key(*this, mem_root); }
|
2002-06-02 20:22:20 +02:00
|
|
|
};
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
typedef struct st_mysql_lock
|
|
|
|
{
|
|
|
|
TABLE **table;
|
|
|
|
uint table_count,lock_count;
|
|
|
|
THR_LOCK_DATA **locks;
|
|
|
|
} MYSQL_LOCK;
|
|
|
|
|
|
|
|
|
|
|
|
class LEX_COLUMN : public Sql_alloc
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
String column;
|
|
|
|
uint rights;
|
|
|
|
LEX_COLUMN (const String& x,const uint& y ): column (x),rights (y) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
#include "sql_lex.h" /* Must be here */
|
|
|
|
|
2007-05-10 16:27:36 +02:00
|
|
|
class Delayed_insert;
|
2002-09-26 22:08:22 +02:00
|
|
|
class select_result;
|
2006-01-19 22:40:56 +01:00
|
|
|
class Time_zone;
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2002-03-30 20:36:05 +01:00
|
|
|
#define THD_SENTRY_MAGIC 0xfeedd1ff
|
|
|
|
#define THD_SENTRY_GONE 0xdeadbeef
|
|
|
|
|
|
|
|
#define THD_CHECK_SENTRY(thd) DBUG_ASSERT(thd->dbug_sentry == THD_SENTRY_MAGIC)
|
|
|
|
|
2002-06-28 18:30:09 +02:00
|
|
|
struct system_variables
|
|
|
|
{
|
2007-03-02 17:43:45 +01:00
|
|
|
/*
|
|
|
|
How dynamically allocated system variables are handled:
|
|
|
|
|
|
|
|
The global_system_variables and max_system_variables are "authoritative"
|
|
|
|
They both should have the same 'version' and 'size'.
|
|
|
|
When attempting to access a dynamic variable, if the session version
|
|
|
|
is out of date, then the session version is updated and realloced if
|
|
|
|
neccessary and bytes copied from global to make up for missing data.
|
|
|
|
*/
|
|
|
|
ulong dynamic_variables_version;
|
|
|
|
char* dynamic_variables_ptr;
|
|
|
|
uint dynamic_variables_head; /* largest valid variable offset */
|
|
|
|
uint dynamic_variables_size; /* how many bytes are in use */
|
|
|
|
|
2002-07-23 17:31:22 +02:00
|
|
|
ulonglong myisam_max_extra_sort_file_size;
|
|
|
|
ulonglong myisam_max_sort_file_size;
|
2006-11-27 23:47:21 +01:00
|
|
|
ulonglong max_heap_table_size;
|
|
|
|
ulonglong tmp_table_size;
|
2002-12-20 13:58:27 +01:00
|
|
|
ha_rows select_limit;
|
|
|
|
ha_rows max_join_size;
|
2004-09-15 21:10:31 +02:00
|
|
|
ulong auto_increment_increment, auto_increment_offset;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong bulk_insert_buff_size;
|
2002-06-28 18:30:09 +02:00
|
|
|
ulong join_buff_size;
|
|
|
|
ulong long_query_time;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong max_allowed_packet;
|
2002-10-02 12:33:08 +02:00
|
|
|
ulong max_error_count;
|
2003-04-24 13:33:33 +02:00
|
|
|
ulong max_length_for_sort_data;
|
2002-10-02 12:33:08 +02:00
|
|
|
ulong max_sort_length;
|
2002-06-28 18:30:09 +02:00
|
|
|
ulong max_tmp_tables;
|
2004-03-04 18:58:36 +01:00
|
|
|
ulong max_insert_delayed_threads;
|
2004-12-23 21:45:10 +01:00
|
|
|
ulong multi_range_count;
|
2003-05-04 18:43:37 +02:00
|
|
|
ulong myisam_repair_threads;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong myisam_sort_buff_size;
|
2005-09-21 00:18:29 +02:00
|
|
|
ulong myisam_stats_method;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong net_buffer_length;
|
2002-06-28 18:30:09 +02:00
|
|
|
ulong net_interactive_timeout;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong net_read_timeout;
|
2002-10-02 12:33:08 +02:00
|
|
|
ulong net_retry_count;
|
2002-06-28 18:30:09 +02:00
|
|
|
ulong net_wait_timeout;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong net_write_timeout;
|
2004-05-20 16:47:43 +02:00
|
|
|
ulong optimizer_prune_level;
|
|
|
|
ulong optimizer_search_depth;
|
2003-06-12 13:29:02 +02:00
|
|
|
ulong preload_buff_size;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong query_cache_type;
|
|
|
|
ulong read_buff_size;
|
|
|
|
ulong read_rnd_buff_size;
|
2005-05-05 17:06:49 +02:00
|
|
|
ulong div_precincrement;
|
2002-06-28 18:30:09 +02:00
|
|
|
ulong sortbuff_size;
|
2007-02-23 12:13:55 +01:00
|
|
|
ulong thread_handling;
|
2002-07-23 17:31:22 +02:00
|
|
|
ulong tx_isolation;
|
2005-02-11 22:33:52 +01:00
|
|
|
ulong completion_type;
|
2003-10-13 14:50:30 +02:00
|
|
|
/* Determines which non-standard SQL behaviour should be enabled */
|
2003-01-16 01:04:50 +01:00
|
|
|
ulong sql_mode;
|
2005-11-23 00:11:19 +01:00
|
|
|
ulong max_sp_recursion_depth;
|
2004-07-16 00:15:55 +02:00
|
|
|
/* check of key presence in updatable view */
|
2004-10-07 11:13:42 +02:00
|
|
|
ulong updatable_views_with_limit;
|
2003-04-02 15:16:19 +02:00
|
|
|
ulong default_week_format;
|
2003-06-27 02:04:54 +02:00
|
|
|
ulong max_seeks_for_key;
|
2003-10-11 21:00:24 +02:00
|
|
|
ulong range_alloc_block_size;
|
|
|
|
ulong query_alloc_block_size;
|
|
|
|
ulong query_prealloc_size;
|
|
|
|
ulong trans_alloc_block_size;
|
|
|
|
ulong trans_prealloc_size;
|
2004-06-01 16:29:24 +02:00
|
|
|
ulong log_warnings;
|
2003-03-18 00:07:40 +01:00
|
|
|
ulong group_concat_max_len;
|
2007-02-23 12:13:55 +01:00
|
|
|
ulong ndb_autoincrement_prefetch_sz;
|
|
|
|
ulong ndb_index_stat_cache_entries;
|
|
|
|
ulong ndb_index_stat_update_freq;
|
|
|
|
ulong binlog_format; // binlog format for this thd (see enum_binlog_format)
|
2002-12-29 22:46:48 +01:00
|
|
|
/*
|
|
|
|
In slave thread we need to know in behalf of which
|
|
|
|
thread the query is being run to replicate temp tables properly
|
|
|
|
*/
|
2007-02-23 12:13:55 +01:00
|
|
|
my_thread_id pseudo_thread_id;
|
2002-12-29 22:46:48 +01:00
|
|
|
|
2003-04-02 15:16:19 +02:00
|
|
|
my_bool low_priority_updates;
|
|
|
|
my_bool new_mode;
|
2007-03-05 18:08:41 +01:00
|
|
|
/*
|
|
|
|
compatibility option:
|
|
|
|
- index usage hints (USE INDEX without a FOR clause) behave as in 5.0
|
|
|
|
*/
|
|
|
|
my_bool old_mode;
|
2004-03-04 17:32:55 +01:00
|
|
|
my_bool query_cache_wlock_invalidate;
|
2005-02-11 22:05:24 +01:00
|
|
|
my_bool engine_condition_pushdown;
|
2004-11-17 09:15:53 +01:00
|
|
|
my_bool ndb_force_send;
|
2006-06-13 16:44:30 +02:00
|
|
|
my_bool ndb_use_copying_alter_table;
|
2004-11-17 09:15:53 +01:00
|
|
|
my_bool ndb_use_exact_count;
|
|
|
|
my_bool ndb_use_transactions;
|
2005-09-15 02:33:28 +02:00
|
|
|
my_bool ndb_index_stat_enable;
|
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement),
and new binlog format called "mixed" (which is statement-based except if only row-based is correct,
in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release):
SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default;
the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha.
It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE
TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because
NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below).
The added tests test the possibility or impossibility to SET, their effects, and the mixed mode,
including in prepared statements and in stored procedures and functions.
Caveats:
a) The mixed mode will not work for stored functions: in mixed mode, a stored function will
always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()).
b) for the same reason, changing the thread's binlog format inside a stored function is
refused with an error message.
c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask
Dmitri).
Additionally, as the binlog format is now changeable by each user for his session, I remove the implication
which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1
(not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically
set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled
phantom protection).
Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
|
|
|
|
2005-07-22 22:43:59 +02:00
|
|
|
my_bool old_alter_table;
|
2003-07-08 00:36:14 +02:00
|
|
|
my_bool old_passwords;
|
2005-08-12 21:15:01 +02:00
|
|
|
|
2007-03-02 17:43:45 +01:00
|
|
|
plugin_ref table_plugin;
|
2007-02-23 12:13:55 +01:00
|
|
|
|
2003-09-15 13:31:04 +02:00
|
|
|
/* Only charset part of these variables is sensible */
|
2006-01-18 09:55:38 +01:00
|
|
|
CHARSET_INFO *character_set_filesystem;
|
2005-08-12 21:15:01 +02:00
|
|
|
CHARSET_INFO *character_set_client;
|
2003-05-21 14:44:12 +02:00
|
|
|
CHARSET_INFO *character_set_results;
|
2005-08-12 21:15:01 +02:00
|
|
|
|
2003-09-15 13:31:04 +02:00
|
|
|
/* Both charset and collation parts of these variables are important */
|
|
|
|
CHARSET_INFO *collation_server;
|
|
|
|
CHARSET_INFO *collation_database;
|
2003-04-23 15:19:22 +02:00
|
|
|
CHARSET_INFO *collation_connection;
|
2003-11-03 13:01:59 +01:00
|
|
|
|
2006-07-04 14:40:40 +02:00
|
|
|
/* Locale Support */
|
|
|
|
MY_LOCALE *lc_time_names;
|
|
|
|
|
2004-06-18 08:11:31 +02:00
|
|
|
Time_zone *time_zone;
|
|
|
|
|
2007-03-23 21:08:31 +01:00
|
|
|
/* DATE, DATETIME and MYSQL_TIME formats */
|
2003-11-03 13:01:59 +01:00
|
|
|
DATE_TIME_FORMAT *date_format;
|
|
|
|
DATE_TIME_FORMAT *datetime_format;
|
|
|
|
DATE_TIME_FORMAT *time_format;
|
2006-03-10 15:47:56 +01:00
|
|
|
my_bool sysdate_is_now;
|
2007-03-05 18:08:41 +01:00
|
|
|
|
2002-07-23 17:31:22 +02:00
|
|
|
};
|
|
|
|
|
2004-09-13 15:48:01 +02:00
|
|
|
|
|
|
|
/* per thread status variables */
|
|
|
|
|
|
|
|
typedef struct system_status_var
|
|
|
|
{
|
|
|
|
ulong bytes_received;
|
|
|
|
ulong bytes_sent;
|
|
|
|
ulong com_other;
|
|
|
|
ulong com_stat[(uint) SQLCOM_END];
|
|
|
|
ulong created_tmp_disk_tables;
|
|
|
|
ulong created_tmp_tables;
|
|
|
|
ulong ha_commit_count;
|
|
|
|
ulong ha_delete_count;
|
|
|
|
ulong ha_read_first_count;
|
|
|
|
ulong ha_read_last_count;
|
|
|
|
ulong ha_read_key_count;
|
|
|
|
ulong ha_read_next_count;
|
|
|
|
ulong ha_read_prev_count;
|
|
|
|
ulong ha_read_rnd_count;
|
|
|
|
ulong ha_read_rnd_next_count;
|
|
|
|
ulong ha_rollback_count;
|
|
|
|
ulong ha_update_count;
|
|
|
|
ulong ha_write_count;
|
2005-01-16 13:16:23 +01:00
|
|
|
ulong ha_prepare_count;
|
|
|
|
ulong ha_discover_count;
|
|
|
|
ulong ha_savepoint_count;
|
|
|
|
ulong ha_savepoint_rollback_count;
|
2004-09-13 15:48:01 +02:00
|
|
|
|
|
|
|
/* KEY_CACHE parts. These are copies of the original */
|
|
|
|
ulong key_blocks_changed;
|
|
|
|
ulong key_blocks_used;
|
|
|
|
ulong key_cache_r_requests;
|
|
|
|
ulong key_cache_read;
|
|
|
|
ulong key_cache_w_requests;
|
|
|
|
ulong key_cache_write;
|
|
|
|
/* END OF KEY_CACHE parts */
|
|
|
|
|
|
|
|
ulong net_big_packet_count;
|
|
|
|
ulong opened_tables;
|
2005-11-23 21:45:02 +01:00
|
|
|
ulong opened_shares;
|
2004-09-13 15:48:01 +02:00
|
|
|
ulong select_full_join_count;
|
|
|
|
ulong select_full_range_join_count;
|
|
|
|
ulong select_range_count;
|
|
|
|
ulong select_range_check_count;
|
|
|
|
ulong select_scan_count;
|
|
|
|
ulong long_query_count;
|
|
|
|
ulong filesort_merge_passes;
|
|
|
|
ulong filesort_range_count;
|
|
|
|
ulong filesort_rows;
|
|
|
|
ulong filesort_scan_count;
|
2005-08-12 21:15:01 +02:00
|
|
|
/* Prepared statements and binary protocol */
|
2005-06-16 22:34:35 +02:00
|
|
|
ulong com_stmt_prepare;
|
|
|
|
ulong com_stmt_execute;
|
|
|
|
ulong com_stmt_send_long_data;
|
|
|
|
ulong com_stmt_fetch;
|
|
|
|
ulong com_stmt_reset;
|
|
|
|
ulong com_stmt_close;
|
2005-05-18 05:39:10 +02:00
|
|
|
|
2006-10-30 13:35:57 +01:00
|
|
|
/*
|
|
|
|
Status variables which it does not make sense to add to
|
|
|
|
global status variable counter
|
|
|
|
*/
|
2005-05-18 05:39:10 +02:00
|
|
|
double last_query_cost;
|
2004-09-13 15:48:01 +02:00
|
|
|
} STATUS_VAR;
|
|
|
|
|
|
|
|
/*
|
2006-10-30 13:35:57 +01:00
|
|
|
This is used for 'SHOW STATUS'. It must be updated to the last ulong
|
|
|
|
variable in system_status_var which is makes sens to add to the global
|
|
|
|
counter
|
2004-09-13 15:48:01 +02:00
|
|
|
*/
|
|
|
|
|
2005-06-16 23:58:36 +02:00
|
|
|
#define last_system_status_var com_stmt_close
|
2004-09-13 15:48:01 +02:00
|
|
|
|
2006-01-19 22:40:56 +01:00
|
|
|
#ifdef MYSQL_SERVER
|
|
|
|
|
2003-01-25 01:25:52 +01:00
|
|
|
void free_tmp_table(THD *thd, TABLE *entry);
|
2003-11-27 18:51:53 +01:00
|
|
|
|
|
|
|
|
2005-07-04 02:42:33 +02:00
|
|
|
/* The following macro is to make init of Query_arena simpler */
|
|
|
|
#ifndef DBUG_OFF
|
|
|
|
#define INIT_ARENA_DBUG_INFO is_backup_arena= 0
|
2005-08-12 21:15:01 +02:00
|
|
|
#else
|
|
|
|
#define INIT_ARENA_DBUG_INFO
|
2005-07-04 02:42:33 +02:00
|
|
|
#endif
|
|
|
|
|
2005-06-15 19:58:35 +02:00
|
|
|
class Query_arena
|
2004-05-20 01:02:49 +02:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
/*
|
|
|
|
List of items created in the parser for this query. Every item puts
|
|
|
|
itself to the list on creation (see Item::Item() for details))
|
|
|
|
*/
|
|
|
|
Item *free_list;
|
2004-11-08 00:13:54 +01:00
|
|
|
MEM_ROOT *mem_root; // Pointer to current memroot
|
2004-11-03 11:39:38 +01:00
|
|
|
#ifndef DBUG_OFF
|
2005-06-23 18:22:08 +02:00
|
|
|
bool is_backup_arena; /* True if this arena is used for backup. */
|
2004-11-03 11:39:38 +01:00
|
|
|
#endif
|
2005-06-15 19:58:35 +02:00
|
|
|
enum enum_state
|
2004-08-24 12:44:15 +02:00
|
|
|
{
|
2005-06-07 12:11:36 +02:00
|
|
|
INITIALIZED= 0, INITIALIZED_FOR_SP= 1, PREPARED= 2,
|
|
|
|
CONVENTIONAL_EXECUTION= 3, EXECUTED= 4, ERROR= -1
|
2004-08-24 12:44:15 +02:00
|
|
|
};
|
2005-06-15 19:58:35 +02:00
|
|
|
|
2004-08-31 12:07:02 +02:00
|
|
|
enum_state state;
|
2004-08-21 00:02:46 +02:00
|
|
|
|
|
|
|
/* We build without RTTI, so dynamic_cast can't be used. */
|
|
|
|
enum Type
|
|
|
|
{
|
|
|
|
STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE
|
|
|
|
};
|
|
|
|
|
2005-06-22 09:59:13 +02:00
|
|
|
Query_arena(MEM_ROOT *mem_root_arg, enum enum_state state_arg) :
|
|
|
|
free_list(0), mem_root(mem_root_arg), state(state_arg)
|
2005-06-23 18:22:08 +02:00
|
|
|
{ INIT_ARENA_DBUG_INFO; }
|
2004-09-23 11:48:17 +02:00
|
|
|
/*
|
2005-06-15 19:58:35 +02:00
|
|
|
This constructor is used only when Query_arena is created as
|
|
|
|
backup storage for another instance of Query_arena.
|
2004-09-23 11:48:17 +02:00
|
|
|
*/
|
2005-06-23 18:22:08 +02:00
|
|
|
Query_arena() { INIT_ARENA_DBUG_INFO; }
|
|
|
|
|
2004-08-21 00:02:46 +02:00
|
|
|
virtual Type type() const;
|
2005-06-15 19:58:35 +02:00
|
|
|
virtual ~Query_arena() {};
|
2004-05-20 01:02:49 +02:00
|
|
|
|
2005-06-07 12:11:36 +02:00
|
|
|
inline bool is_stmt_prepare() const { return state == INITIALIZED; }
|
2005-06-21 19:15:21 +02:00
|
|
|
inline bool is_first_sp_execute() const
|
|
|
|
{ return state == INITIALIZED_FOR_SP; }
|
2005-06-02 22:02:47 +02:00
|
|
|
inline bool is_stmt_prepare_or_first_sp_execute() const
|
|
|
|
{ return (int)state < (int)PREPARED; }
|
2004-08-31 12:07:02 +02:00
|
|
|
inline bool is_first_stmt_execute() const { return state == PREPARED; }
|
2004-10-22 12:47:35 +02:00
|
|
|
inline bool is_stmt_execute() const
|
|
|
|
{ return state == PREPARED || state == EXECUTED; }
|
2004-11-03 11:39:38 +01:00
|
|
|
inline bool is_conventional() const
|
2004-09-10 01:22:44 +02:00
|
|
|
{ return state == CONVENTIONAL_EXECUTION; }
|
2005-06-22 09:59:13 +02:00
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
inline void* alloc(size_t size) { return alloc_root(mem_root,size); }
|
|
|
|
inline void* calloc(size_t size)
|
2004-05-20 01:02:49 +02:00
|
|
|
{
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
void *ptr;
|
2004-11-08 00:13:54 +01:00
|
|
|
if ((ptr=alloc_root(mem_root,size)))
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
bzero(ptr, size);
|
2004-05-20 01:02:49 +02:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
inline char *strdup(const char *str)
|
2004-11-08 00:13:54 +01:00
|
|
|
{ return strdup_root(mem_root,str); }
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
inline char *strmake(const char *str, size_t size)
|
2004-11-08 00:13:54 +01:00
|
|
|
{ return strmake_root(mem_root,str,size); }
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
inline bool LEX_STRING_make(LEX_STRING *lex_str, const char *str,
|
|
|
|
size_t size)
|
2006-10-16 18:57:33 +02:00
|
|
|
{
|
|
|
|
return ((lex_str->str=
|
|
|
|
strmake_root(mem_root, str, (lex_str->length= size)))) == 0;
|
|
|
|
}
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
inline void *memdup(const void *str, size_t size)
|
2004-11-08 00:13:54 +01:00
|
|
|
{ return memdup_root(mem_root,str,size); }
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
inline void *memdup_w_gap(const void *str, size_t size, uint gap)
|
2004-05-20 01:02:49 +02:00
|
|
|
{
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
void *ptr;
|
|
|
|
if ((ptr= alloc_root(mem_root,size+gap)))
|
2004-05-20 01:02:49 +02:00
|
|
|
memcpy(ptr,str,size);
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2005-09-02 15:21:19 +02:00
|
|
|
void set_query_arena(Query_arena *set);
|
2005-06-23 18:22:08 +02:00
|
|
|
|
|
|
|
void free_items();
|
2005-09-22 00:11:21 +02:00
|
|
|
/* Close the active state associated with execution of this statement */
|
|
|
|
virtual void cleanup_stmt();
|
2004-05-20 01:02:49 +02:00
|
|
|
};
|
|
|
|
|
2004-08-03 12:32:21 +02:00
|
|
|
|
2005-09-22 00:11:21 +02:00
|
|
|
class Server_side_cursor;
|
2004-08-03 12:32:21 +02:00
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
|
|
|
@class Statement
|
|
|
|
@brief State of a single command executed against this connection.
|
|
|
|
|
2003-11-27 18:51:53 +01:00
|
|
|
One connection can contain a lot of simultaneously running statements,
|
|
|
|
some of which could be:
|
|
|
|
- prepared, that is, contain placeholders,
|
|
|
|
- opened as cursors. We maintain 1 to 1 relationship between
|
|
|
|
statement and cursor - if user wants to create another cursor for his
|
|
|
|
query, we create another statement for it.
|
|
|
|
To perform some action with statement we reset THD part to the state of
|
|
|
|
that statement, do the action, and then save back modified state from THD
|
|
|
|
to the statement. It will be changed in near future, and Statement will
|
|
|
|
be used explicitly.
|
|
|
|
*/
|
|
|
|
|
2005-07-19 20:21:12 +02:00
|
|
|
class Statement: public ilink, public Query_arena
|
2003-11-27 18:51:53 +01:00
|
|
|
{
|
2003-12-20 00:16:10 +01:00
|
|
|
Statement(const Statement &rhs); /* not implemented: */
|
|
|
|
Statement &operator=(const Statement &rhs); /* non-copyable */
|
2003-11-27 18:51:53 +01:00
|
|
|
public:
|
|
|
|
/*
|
2003-12-04 20:08:26 +01:00
|
|
|
Uniquely identifies each statement object in thread scope; change during
|
2003-12-20 00:16:10 +01:00
|
|
|
statement lifetime. FIXME: must be const
|
2003-11-27 18:51:53 +01:00
|
|
|
*/
|
2003-12-04 20:08:26 +01:00
|
|
|
ulong id;
|
2003-11-27 18:51:53 +01:00
|
|
|
|
|
|
|
/*
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
MARK_COLUMNS_NONE: Means mark_used_colums is not set and no indicator to
|
|
|
|
handler of fields used is set
|
|
|
|
MARK_COLUMNS_READ: Means a bit in read set is set to inform handler
|
|
|
|
that the field is to be read. If field list contains
|
|
|
|
duplicates, then thd->dup_field is set to point
|
|
|
|
to the last found duplicate.
|
|
|
|
MARK_COLUMNS_WRITE: Means a bit is set in write set to inform handler
|
|
|
|
that it needs to update this field in write_row
|
|
|
|
and update_row.
|
2003-11-27 18:51:53 +01:00
|
|
|
*/
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
enum enum_mark_columns mark_used_columns;
|
2003-11-27 18:51:53 +01:00
|
|
|
|
2004-04-12 23:58:48 +02:00
|
|
|
LEX_STRING name; /* name for named prepared statements */
|
2003-11-27 18:51:53 +01:00
|
|
|
LEX *lex; // parse tree descriptor
|
|
|
|
/*
|
|
|
|
Points to the query associated with this statement. It's const, but
|
|
|
|
we need to declare it char * because all table handlers are written
|
|
|
|
in C and need to point to it.
|
2004-10-12 22:17:37 +02:00
|
|
|
|
|
|
|
Note that (A) if we set query = NULL, we must at the same time set
|
|
|
|
query_length = 0, and protect the whole operation with the
|
|
|
|
LOCK_thread_count mutex. And (B) we are ONLY allowed to set query to a
|
|
|
|
non-NULL value if its previous value is NULL. We do not need to protect
|
|
|
|
operation (B) with any mutex. To avoid crashes in races, if we do not
|
|
|
|
know that thd->query cannot change at the moment, one should print
|
|
|
|
thd->query like this:
|
|
|
|
(1) reserve the LOCK_thread_count mutex;
|
|
|
|
(2) check if thd->query is NULL;
|
|
|
|
(3) if not NULL, then print at most thd->query_length characters from
|
|
|
|
it. We will see the query_length field as either 0, or the right value
|
|
|
|
for it.
|
|
|
|
Assuming that the write and read of an n-bit memory field in an n-bit
|
|
|
|
computer is atomic, we can avoid races in the above way.
|
|
|
|
This printing is needed at least in SHOW PROCESSLIST and SHOW INNODB
|
|
|
|
STATUS.
|
2003-11-27 18:51:53 +01:00
|
|
|
*/
|
|
|
|
char *query;
|
|
|
|
uint32 query_length; // current query length
|
2005-09-22 00:11:21 +02:00
|
|
|
Server_side_cursor *cursor;
|
2003-11-27 18:51:53 +01:00
|
|
|
|
2003-12-20 00:16:10 +01:00
|
|
|
public:
|
|
|
|
|
2005-06-22 21:12:25 +02:00
|
|
|
/* This constructor is called for backup statements */
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
Statement() {}
|
2003-12-20 00:16:10 +01:00
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
|
|
|
|
enum enum_state state_arg, ulong id_arg);
|
2003-11-27 18:51:53 +01:00
|
|
|
virtual ~Statement();
|
2003-12-20 00:16:10 +01:00
|
|
|
|
|
|
|
/* Assign execution context (note: not all members) of given stmt to self */
|
|
|
|
void set_statement(Statement *stmt);
|
2004-08-21 00:02:46 +02:00
|
|
|
void set_n_backup_statement(Statement *stmt, Statement *backup);
|
|
|
|
void restore_backup_statement(Statement *stmt, Statement *backup);
|
2003-12-20 00:16:10 +01:00
|
|
|
/* return class type */
|
|
|
|
virtual Type type() const;
|
2003-11-27 18:51:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
2004-04-12 23:58:48 +02:00
|
|
|
Container for all statements created/used in a connection.
|
|
|
|
Statements in Statement_map have unique Statement::id (guaranteed by id
|
|
|
|
assignment in Statement::Statement)
|
|
|
|
Non-empty statement names are unique too: attempt to insert a new statement
|
|
|
|
with duplicate name causes older statement to be deleted
|
2004-09-22 13:50:07 +02:00
|
|
|
|
2004-04-12 23:58:48 +02:00
|
|
|
Statements are auto-deleted when they are removed from the map and when the
|
|
|
|
map is deleted.
|
2003-11-27 18:51:53 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
class Statement_map
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
Statement_map();
|
2004-09-22 13:50:07 +02:00
|
|
|
|
2006-04-07 21:37:06 +02:00
|
|
|
int insert(THD *thd, Statement *statement);
|
2004-04-12 23:58:48 +02:00
|
|
|
|
|
|
|
Statement *find_by_name(LEX_STRING *name)
|
2003-11-27 18:51:53 +01:00
|
|
|
{
|
2004-04-12 23:58:48 +02:00
|
|
|
Statement *stmt;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
stmt= (Statement*)hash_search(&names_hash, (uchar*)name->str,
|
2004-04-12 23:58:48 +02:00
|
|
|
name->length);
|
|
|
|
return stmt;
|
2003-11-27 18:51:53 +01:00
|
|
|
}
|
2003-12-20 00:16:10 +01:00
|
|
|
|
|
|
|
Statement *find(ulong id)
|
2003-11-27 18:51:53 +01:00
|
|
|
{
|
2003-12-20 00:16:10 +01:00
|
|
|
if (last_found_statement == 0 || id != last_found_statement->id)
|
2004-05-21 02:27:50 +02:00
|
|
|
{
|
|
|
|
Statement *stmt;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
stmt= (Statement *) hash_search(&st_hash, (uchar *) &id, sizeof(id));
|
2004-06-22 09:04:41 +02:00
|
|
|
if (stmt && stmt->name.str)
|
2004-05-21 02:27:50 +02:00
|
|
|
return NULL;
|
|
|
|
last_found_statement= stmt;
|
|
|
|
}
|
2003-12-20 00:16:10 +01:00
|
|
|
return last_found_statement;
|
2003-11-27 18:51:53 +01:00
|
|
|
}
|
2005-07-19 20:21:12 +02:00
|
|
|
/*
|
|
|
|
Close all cursors of this connection that use tables of a storage
|
|
|
|
engine that has transaction-specific state and therefore can not
|
|
|
|
survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
|
|
|
|
*/
|
|
|
|
void close_transient_cursors();
|
2006-04-07 21:37:06 +02:00
|
|
|
void erase(Statement *statement);
|
2004-09-22 13:50:07 +02:00
|
|
|
/* Erase all statements (calls Statement destructor) */
|
2006-04-07 21:37:06 +02:00
|
|
|
void reset();
|
|
|
|
~Statement_map();
|
2003-11-27 18:51:53 +01:00
|
|
|
private:
|
|
|
|
HASH st_hash;
|
2004-04-12 23:58:48 +02:00
|
|
|
HASH names_hash;
|
2005-07-19 20:21:12 +02:00
|
|
|
I_List<Statement> transient_cursor_list;
|
2003-12-20 00:16:10 +01:00
|
|
|
Statement *last_found_statement;
|
2003-11-27 18:51:53 +01:00
|
|
|
};
|
|
|
|
|
2005-01-16 13:16:23 +01:00
|
|
|
struct st_savepoint {
|
|
|
|
struct st_savepoint *prev;
|
|
|
|
char *name;
|
|
|
|
uint length, nht;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum xa_states {XA_NOTR=0, XA_ACTIVE, XA_IDLE, XA_PREPARED};
|
2005-01-27 22:38:56 +01:00
|
|
|
extern const char *xa_state_names[];
|
2003-11-27 18:51:53 +01:00
|
|
|
|
2005-08-12 21:15:01 +02:00
|
|
|
typedef struct st_xid_state {
|
|
|
|
/* For now, this is only used to catch duplicated external xids */
|
|
|
|
XID xid; // transaction identifier
|
|
|
|
enum xa_states xa_state; // used by external XA only
|
|
|
|
bool in_thd;
|
|
|
|
} XID_STATE;
|
|
|
|
|
|
|
|
extern pthread_mutex_t LOCK_xid_cache;
|
|
|
|
extern HASH xid_cache;
|
|
|
|
bool xid_cache_init(void);
|
|
|
|
void xid_cache_free(void);
|
|
|
|
XID_STATE *xid_cache_search(XID *xid);
|
|
|
|
bool xid_cache_insert(XID *xid, enum xa_states xa_state);
|
|
|
|
bool xid_cache_insert(XID_STATE *xid_state);
|
|
|
|
void xid_cache_delete(XID_STATE *xid_state);
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
|
|
|
@class Security_context
|
|
|
|
@brief A set of THD members describing the current authenticated user.
|
|
|
|
*/
|
2005-09-15 21:29:07 +02:00
|
|
|
|
2005-09-20 20:20:38 +02:00
|
|
|
class Security_context {
|
|
|
|
public:
|
2006-02-25 16:46:30 +01:00
|
|
|
Security_context() {} /* Remove gcc warning */
|
2005-09-15 21:29:07 +02:00
|
|
|
/*
|
|
|
|
host - host of the client
|
|
|
|
user - user of the client, set to NULL until the user has been read from
|
|
|
|
the connection
|
2005-09-20 20:20:38 +02:00
|
|
|
priv_user - The user privilege we are using. May be "" for anonymous user.
|
2005-09-15 21:29:07 +02:00
|
|
|
ip - client IP
|
|
|
|
*/
|
|
|
|
char *host, *user, *priv_user, *ip;
|
2005-09-20 20:20:38 +02:00
|
|
|
/* The host privilege we are using */
|
2005-09-15 21:29:07 +02:00
|
|
|
char priv_host[MAX_HOSTNAME];
|
|
|
|
/* points to host if host is available, otherwise points to ip */
|
|
|
|
const char *host_or_ip;
|
|
|
|
ulong master_access; /* Global privileges from mysql.user */
|
|
|
|
ulong db_access; /* Privileges for current db */
|
|
|
|
|
|
|
|
void init();
|
|
|
|
void destroy();
|
|
|
|
void skip_grants();
|
2005-09-20 20:20:38 +02:00
|
|
|
inline char *priv_host_name()
|
2005-09-15 21:29:07 +02:00
|
|
|
{
|
|
|
|
return (*priv_host ? priv_host : (char *)"%");
|
|
|
|
}
|
2006-05-22 20:46:13 +02:00
|
|
|
|
|
|
|
bool set_user(char *user_arg);
|
2007-04-13 22:35:56 +02:00
|
|
|
|
|
|
|
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
|
|
|
bool
|
|
|
|
change_security_context(THD *thd,
|
|
|
|
LEX_STRING *definer_user,
|
|
|
|
LEX_STRING *definer_host,
|
|
|
|
LEX_STRING *db,
|
|
|
|
Security_context **backup);
|
|
|
|
|
|
|
|
void
|
|
|
|
restore_security_context(THD *thd, Security_context *backup);
|
|
|
|
#endif
|
2005-09-15 21:29:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
2004-10-08 00:21:19 +02:00
|
|
|
A registry for item tree transformations performed during
|
|
|
|
query optimization. We register only those changes which require
|
|
|
|
a rollback to re-execute a prepared statement or stored procedure
|
|
|
|
yet another time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct Item_change_record;
|
|
|
|
typedef I_List<Item_change_record> Item_change_list;
|
|
|
|
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
2005-03-04 14:35:28 +01:00
|
|
|
Type of prelocked mode.
|
|
|
|
See comment for THD::prelocked_mode for complete description.
|
|
|
|
*/
|
|
|
|
|
|
|
|
enum prelocked_mode_type {NON_PRELOCKED= 0, PRELOCKED= 1,
|
|
|
|
PRELOCKED_UNDER_LOCK_TABLES= 2};
|
|
|
|
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
2005-08-12 21:15:01 +02:00
|
|
|
Class that holds information about tables which were opened and locked
|
2005-07-13 11:48:13 +02:00
|
|
|
by the thread. It is also used to save/restore this information in
|
|
|
|
push_open_tables_state()/pop_open_tables_state().
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Open_tables_state
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
/*
|
|
|
|
open_tables - list of regular tables in use by this thread
|
|
|
|
temporary_tables - list of temp tables in use by this thread
|
|
|
|
handler_tables - list of tables that were opened with HANDLER OPEN
|
|
|
|
and are still in use by this thread
|
|
|
|
*/
|
|
|
|
TABLE *open_tables, *temporary_tables, *handler_tables, *derived_tables;
|
|
|
|
/*
|
|
|
|
During a MySQL session, one can lock tables in two modes: automatic
|
|
|
|
or manual. In automatic mode all necessary tables are locked just before
|
|
|
|
statement execution, and all acquired locks are stored in 'lock'
|
|
|
|
member. Unlocking takes place automatically as well, when the
|
|
|
|
statement ends.
|
|
|
|
Manual mode comes into play when a user issues a 'LOCK TABLES'
|
|
|
|
statement. In this mode the user can only use the locked tables.
|
|
|
|
Trying to use any other tables will give an error. The locked tables are
|
|
|
|
stored in 'locked_tables' member. Manual locking is described in
|
|
|
|
the 'LOCK_TABLES' chapter of the MySQL manual.
|
|
|
|
See also lock_tables() for details.
|
|
|
|
*/
|
|
|
|
MYSQL_LOCK *lock;
|
|
|
|
/*
|
|
|
|
Tables that were locked with explicit or implicit LOCK TABLES.
|
|
|
|
(Implicit LOCK TABLES happens when we are prelocking tables for
|
|
|
|
execution of statement which uses stored routines. See description
|
|
|
|
THD::prelocked_mode for more info.)
|
|
|
|
*/
|
|
|
|
MYSQL_LOCK *locked_tables;
|
2006-06-22 13:28:04 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
CREATE-SELECT keeps an extra lock for the table being
|
|
|
|
created. This field is used to keep the extra lock available for
|
|
|
|
lower level routines, which would otherwise miss that lock.
|
|
|
|
*/
|
|
|
|
MYSQL_LOCK *extra_lock;
|
|
|
|
|
2005-07-13 11:48:13 +02:00
|
|
|
/*
|
|
|
|
prelocked_mode_type enum and prelocked_mode member are used for
|
|
|
|
indicating whenever "prelocked mode" is on, and what type of
|
|
|
|
"prelocked mode" is it.
|
|
|
|
|
|
|
|
Prelocked mode is used for execution of queries which explicitly
|
|
|
|
or implicitly (via views or triggers) use functions, thus may need
|
|
|
|
some additional tables (mentioned in query table list) for their
|
|
|
|
execution.
|
|
|
|
|
|
|
|
First open_tables() call for such query will analyse all functions
|
|
|
|
used by it and add all additional tables to table its list. It will
|
|
|
|
also mark this query as requiring prelocking. After that lock_tables()
|
|
|
|
will issue implicit LOCK TABLES for the whole table list and change
|
|
|
|
thd::prelocked_mode to non-0. All queries called in functions invoked
|
|
|
|
by the main query will use prelocked tables. Non-0 prelocked_mode
|
|
|
|
will also surpress mentioned analysys in those queries thus saving
|
|
|
|
cycles. Prelocked mode will be turned off once close_thread_tables()
|
|
|
|
for the main query will be called.
|
|
|
|
|
|
|
|
Note: Since not all "tables" present in table list are really locked
|
|
|
|
thd::prelocked_mode does not imply thd::locked_tables.
|
|
|
|
*/
|
|
|
|
prelocked_mode_type prelocked_mode;
|
|
|
|
ulong version;
|
|
|
|
uint current_tablenr;
|
|
|
|
|
2006-02-16 08:30:53 +01:00
|
|
|
enum enum_flags {
|
|
|
|
BACKUPS_AVAIL = (1U << 0) /* There are backups available */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
Flags with information about the open tables state.
|
|
|
|
*/
|
|
|
|
uint state_flags;
|
|
|
|
|
2005-08-08 15:46:06 +02:00
|
|
|
/*
|
|
|
|
This constructor serves for creation of Open_tables_state instances
|
|
|
|
which are used as backup storage.
|
|
|
|
*/
|
2006-02-16 08:30:53 +01:00
|
|
|
Open_tables_state() : state_flags(0U) { }
|
2005-08-08 15:46:06 +02:00
|
|
|
|
|
|
|
Open_tables_state(ulong version_arg);
|
2005-07-13 11:48:13 +02:00
|
|
|
|
|
|
|
void set_open_tables_state(Open_tables_state *state)
|
|
|
|
{
|
|
|
|
*this= *state;
|
|
|
|
}
|
|
|
|
|
|
|
|
void reset_open_tables_state()
|
|
|
|
{
|
|
|
|
open_tables= temporary_tables= handler_tables= derived_tables= 0;
|
2006-06-22 13:28:04 +02:00
|
|
|
extra_lock= lock= locked_tables= 0;
|
2005-07-13 11:48:13 +02:00
|
|
|
prelocked_mode= NON_PRELOCKED;
|
2006-02-16 08:30:53 +01:00
|
|
|
state_flags= 0U;
|
2005-07-13 11:48:13 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
|
|
|
@class Sub_statement_state
|
|
|
|
@brief Used to save context when executing a function or trigger
|
|
|
|
*/
|
2005-08-15 17:15:12 +02:00
|
|
|
|
|
|
|
/* Defines used for Sub_statement_state::in_sub_stmt */
|
|
|
|
|
|
|
|
#define SUB_STMT_TRIGGER 1
|
|
|
|
#define SUB_STMT_FUNCTION 2
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
|
2005-08-15 17:15:12 +02:00
|
|
|
class Sub_statement_state
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
ulonglong options;
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
ulonglong first_successful_insert_id_in_prev_stmt;
|
|
|
|
ulonglong first_successful_insert_id_in_cur_stmt, insert_id_for_cur_row;
|
|
|
|
Discrete_interval auto_inc_interval_for_cur_row;
|
2005-08-15 17:15:12 +02:00
|
|
|
ulonglong limit_found_rows;
|
|
|
|
ha_rows cuted_fields, sent_row_count, examined_row_count;
|
|
|
|
ulong client_capabilities;
|
|
|
|
uint in_sub_stmt;
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
bool enable_slow_log;
|
2006-06-16 11:05:58 +02:00
|
|
|
bool last_insert_id_used;
|
2005-08-15 17:15:12 +02:00
|
|
|
my_bool no_send_ok;
|
2005-11-19 13:09:23 +01:00
|
|
|
SAVEPOINT *savepoints;
|
2005-08-15 17:15:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-05-22 20:46:13 +02:00
|
|
|
/* Flags for the THD::system_thread variable */
|
|
|
|
enum enum_thread_type
|
|
|
|
{
|
|
|
|
NON_SYSTEM_THREAD= 0,
|
|
|
|
SYSTEM_THREAD_DELAYED_INSERT= 1,
|
|
|
|
SYSTEM_THREAD_SLAVE_IO= 2,
|
|
|
|
SYSTEM_THREAD_SLAVE_SQL= 4,
|
|
|
|
SYSTEM_THREAD_NDBCLUSTER_BINLOG= 8,
|
|
|
|
SYSTEM_THREAD_EVENT_SCHEDULER= 16,
|
|
|
|
SYSTEM_THREAD_EVENT_WORKER= 32
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2007-03-06 21:46:33 +01:00
|
|
|
/**
|
|
|
|
This class represents the interface for internal error handlers.
|
|
|
|
Internal error handlers are exception handlers used by the server
|
|
|
|
implementation.
|
|
|
|
*/
|
|
|
|
class Internal_error_handler
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
Internal_error_handler() {}
|
|
|
|
virtual ~Internal_error_handler() {}
|
|
|
|
|
|
|
|
public:
|
|
|
|
/**
|
|
|
|
Handle an error condition.
|
|
|
|
This method can be implemented by a subclass to achieve any of the
|
|
|
|
following:
|
|
|
|
- mask an error internally, prevent exposing it to the user,
|
|
|
|
- mask an error and throw another one instead.
|
|
|
|
When this method returns true, the error condition is considered
|
|
|
|
'handled', and will not be propagated to upper layers.
|
|
|
|
It is the responsability of the code installing an internal handler
|
|
|
|
to then check for trapped conditions, and implement logic to recover
|
|
|
|
from the anticipated conditions trapped during runtime.
|
|
|
|
|
|
|
|
This mechanism is similar to C++ try/throw/catch:
|
|
|
|
- 'try' correspond to <code>THD::push_internal_handler()</code>,
|
|
|
|
- 'throw' correspond to <code>my_error()</code>,
|
|
|
|
which invokes <code>my_message_sql()</code>,
|
|
|
|
- 'catch' correspond to checking how/if an internal handler was invoked,
|
|
|
|
before removing it from the exception stack with
|
|
|
|
<code>THD::pop_internal_handler()</code>.
|
|
|
|
|
|
|
|
@param sql_errno the error number
|
|
|
|
@param level the error level
|
|
|
|
@param thd the calling thread
|
|
|
|
@return true if the error is handled
|
|
|
|
*/
|
|
|
|
virtual bool handle_error(uint sql_errno,
|
|
|
|
MYSQL_ERROR::enum_warning_level level,
|
|
|
|
THD *thd) = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
|
|
|
@class THD
|
2002-07-23 17:31:22 +02:00
|
|
|
For each client connection we create a separate thread with THD serving as
|
|
|
|
a thread/connection descriptor
|
|
|
|
*/
|
2002-01-20 03:16:52 +01:00
|
|
|
|
2005-07-19 20:21:12 +02:00
|
|
|
class THD :public Statement,
|
2005-07-13 11:48:13 +02:00
|
|
|
public Open_tables_state
|
2003-01-28 07:38:28 +01:00
|
|
|
{
|
2000-07-31 21:29:14 +02:00
|
|
|
public:
|
2005-12-22 06:39:02 +01:00
|
|
|
/* Used to execute base64 coded binlog events in MySQL server */
|
|
|
|
RELAY_LOG_INFO* rli_fake;
|
|
|
|
|
2005-10-25 11:02:48 +02:00
|
|
|
/*
|
|
|
|
Constant for THD::where initialization in the beginning of every query.
|
|
|
|
|
|
|
|
It's needed because we do not save/restore THD::where normally during
|
|
|
|
primary (non subselect) query execution.
|
|
|
|
*/
|
|
|
|
static const char * const DEFAULT_WHERE;
|
|
|
|
|
2002-12-16 14:33:29 +01:00
|
|
|
#ifdef EMBEDDED_LIBRARY
|
|
|
|
struct st_mysql *mysql;
|
2003-09-16 13:06:25 +02:00
|
|
|
unsigned long client_stmt_id;
|
|
|
|
unsigned long client_param_count;
|
2003-09-17 17:48:53 +02:00
|
|
|
struct st_mysql_bind *client_params;
|
2003-10-06 13:32:38 +02:00
|
|
|
char *extra_data;
|
|
|
|
ulong extra_length;
|
2006-01-04 11:20:28 +01:00
|
|
|
struct st_mysql_data *cur_data;
|
|
|
|
struct st_mysql_data *first_data;
|
|
|
|
struct st_mysql_data **data_tail;
|
|
|
|
void clear_data_list();
|
|
|
|
struct st_mysql_data *alloc_new_dataset();
|
2006-10-24 14:19:02 +02:00
|
|
|
/*
|
|
|
|
In embedded server it points to the statement that is processed
|
|
|
|
in the current query. We store some results directly in statement
|
|
|
|
fields then.
|
|
|
|
*/
|
|
|
|
struct st_mysql_stmt *current_stmt;
|
2002-12-16 14:33:29 +01:00
|
|
|
#endif
|
2002-07-23 17:31:22 +02:00
|
|
|
NET net; // client connection descriptor
|
2002-10-02 12:33:08 +02:00
|
|
|
MEM_ROOT warn_root; // For warnings and errors
|
2002-12-11 08:17:51 +01:00
|
|
|
Protocol *protocol; // Current protocol
|
2007-01-30 22:48:05 +01:00
|
|
|
Protocol_text protocol_text; // Normal protocol
|
|
|
|
Protocol_binary protocol_binary; // Binary protocol
|
2002-07-23 17:31:22 +02:00
|
|
|
HASH user_vars; // hash for user variables
|
|
|
|
String packet; // dynamic buffer for network I/O
|
2004-05-25 00:03:49 +02:00
|
|
|
String convert_buffer; // buffer for charset conversions
|
2002-07-23 17:31:22 +02:00
|
|
|
struct sockaddr_in remote; // client socket address
|
|
|
|
struct rand_struct rand; // used for authentication
|
|
|
|
struct system_variables variables; // Changeable local variables
|
2004-09-13 15:48:01 +02:00
|
|
|
struct system_status_var status_var; // Per thread statistic vars
|
2006-06-20 12:20:32 +02:00
|
|
|
struct system_status_var *initial_status_var; /* used by show status */
|
2005-07-19 20:21:12 +02:00
|
|
|
THR_LOCK_INFO lock_info; // Locking info of this thread
|
|
|
|
THR_LOCK_OWNER main_lock_id; // To use for conventional queries
|
|
|
|
THR_LOCK_OWNER *lock_id; // If not main_lock_id, points to
|
|
|
|
// the lock_id of a cursor.
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_t LOCK_delete; // Locked before thd is deleted
|
2003-12-20 00:16:10 +01:00
|
|
|
/* all prepared statements and cursors of this connection */
|
2005-08-12 21:15:01 +02:00
|
|
|
Statement_map stmt_map;
|
2002-07-23 17:31:22 +02:00
|
|
|
/*
|
|
|
|
A pointer to the stack frame of handle_one_connection(),
|
|
|
|
which is called first in the thread for handling a client
|
|
|
|
*/
|
|
|
|
char *thread_stack;
|
|
|
|
|
2002-01-20 03:16:52 +01:00
|
|
|
/*
|
|
|
|
db - currently selected database
|
This will be pushed only after I fix the testsuite.
This is the main commit for Worklog tasks:
* A more dynamic binlog format which allows small changes (1064)
* Log session variables in Query_log_event (1063)
Below 5.0 means 5.0.0.
MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed),
SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think
of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this
works for queries, except LOAD DATA INFILE (for this it would have to wait
for Dmitri's push of WL#874, which in turns waits for the present push, so...
the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1,
5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1.
Apart from that, the new binlog format is designed so that it can tolerate
a little variation in the events (so that a 5.0.0 slave could replicate a
5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I
later add replication of charsets it should break nothing. And when I later
add a UID to every event, it should break nothing.
The main change brought by this patch is a new type of event, Format_description_log_event,
which describes some lengthes in other event types. This event is needed for
the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event,
we can later add more bytes to the header of every event without breaking compatibility.
Inside Query_log_event, we have some additional dynamic format, as every Query_log_event
can have a different number of status variables, stored as pairs (code, value); that's
how SQL_MODE and session variables and catalog are stored. Like this, we can later
add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows
if we want.
MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs.
Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs),
upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs);
so both can be "hot" upgrades.
Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0.
3.23 and 4.x can't be slaves of 5.0.
So downgrading from 5.0 to 4.x may be complicated.
Log_event::log_pos is now the position of the end of the event, which is
more useful than the position of the beginning. We take care about compatibility
with <5.0 (in which log_pos is the beginning).
I added a short test for replication of SQL_MODE and some other variables.
TODO:
- after committing this, merge the latest 5.0 into it
- fix all tests
- update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
|
|
|
catalog - currently selected catalog
|
2003-12-19 22:40:23 +01:00
|
|
|
WARNING: some members of THD (currently 'db', 'catalog' and 'query') are
|
|
|
|
set and alloced by the slave SQL thread (for the THD of that thread); that
|
|
|
|
thread is (and must remain, for now) the only responsible for freeing these
|
|
|
|
3 members. If you add members here, and you add code to set them in
|
|
|
|
replication, don't forget to free_them_and_set_them_to_0 in replication
|
2005-10-08 16:39:55 +02:00
|
|
|
properly. For details see the 'err:' label of the handle_slave_sql()
|
|
|
|
in sql/slave.cc.
|
2002-01-20 03:16:52 +01:00
|
|
|
*/
|
2005-09-15 21:29:07 +02:00
|
|
|
char *db, *catalog;
|
2005-09-20 20:20:38 +02:00
|
|
|
Security_context main_security_ctx;
|
|
|
|
Security_context *security_ctx;
|
2005-09-15 21:29:07 +02:00
|
|
|
|
2003-04-02 15:16:19 +02:00
|
|
|
/* remote (peer) port */
|
|
|
|
uint16 peer_port;
|
2003-10-30 19:30:20 +01:00
|
|
|
/*
|
|
|
|
Points to info-string that we show in SHOW PROCESSLIST
|
|
|
|
You are supposed to update thd->proc_info only if you have coded
|
|
|
|
a time-consuming piece that MySQL can get stuck in for a long time.
|
|
|
|
*/
|
2002-06-12 14:04:18 +02:00
|
|
|
const char *proc_info;
|
2003-04-29 00:15:18 +02:00
|
|
|
|
2003-01-18 15:39:21 +01:00
|
|
|
ulong client_capabilities; /* What the client supports */
|
2003-01-04 14:17:16 +01:00
|
|
|
ulong max_client_packet_length;
|
2002-01-20 03:16:52 +01:00
|
|
|
|
2004-10-06 18:14:33 +02:00
|
|
|
HASH handler_tables_hash;
|
2003-12-04 17:12:01 +01:00
|
|
|
/*
|
|
|
|
One thread can hold up to one named user-level lock. This variable
|
|
|
|
points to a lock object if the lock is present. See item_func.cc and
|
|
|
|
chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK.
|
|
|
|
*/
|
2004-03-15 20:39:36 +01:00
|
|
|
User_level_lock *ull;
|
2002-03-30 20:36:05 +01:00
|
|
|
#ifndef DBUG_OFF
|
|
|
|
uint dbug_sentry; // watch out for memory corruption
|
2003-11-14 13:50:19 +01:00
|
|
|
#endif
|
2000-07-31 21:29:14 +02:00
|
|
|
struct st_my_thread_var *mysys_var;
|
2004-03-02 20:39:50 +01:00
|
|
|
/*
|
2005-06-17 21:26:25 +02:00
|
|
|
Type of current query: COM_STMT_PREPARE, COM_QUERY, etc. Set from
|
2004-03-02 20:39:50 +01:00
|
|
|
first byte of the packet in do_command()
|
|
|
|
*/
|
|
|
|
enum enum_server_command command;
|
2002-07-23 17:31:22 +02:00
|
|
|
uint32 server_id;
|
2001-12-05 12:03:00 +01:00
|
|
|
uint32 file_id; // for LOAD DATA INFILE
|
2003-10-30 19:30:20 +01:00
|
|
|
/*
|
|
|
|
Used in error messages to tell user in what part of MySQL we found an
|
|
|
|
error. E. g. when where= "having clause", if fix_fields() fails, user
|
|
|
|
will know that the error was in having clause.
|
|
|
|
*/
|
2000-07-31 21:29:14 +02:00
|
|
|
const char *where;
|
2001-12-05 12:03:00 +01:00
|
|
|
time_t start_time,time_after_lock,user_time;
|
|
|
|
time_t connect_time,thr_create_time; // track down slow pthread_create
|
2000-07-31 21:29:14 +02:00
|
|
|
thr_lock_type update_lock_default;
|
2007-05-10 16:27:36 +02:00
|
|
|
Delayed_insert *di;
|
2005-08-12 21:15:01 +02:00
|
|
|
|
2005-08-15 17:15:12 +02:00
|
|
|
/* <> 0 if we are inside of trigger or stored function. */
|
|
|
|
uint in_sub_stmt;
|
2005-08-12 21:15:01 +02:00
|
|
|
|
2005-01-16 13:16:23 +01:00
|
|
|
/* container for handler's private per-connection data */
|
|
|
|
void *ha_data[MAX_HA];
|
2005-12-22 06:39:02 +01:00
|
|
|
|
|
|
|
#ifndef MYSQL_CLIENT
|
2006-05-16 11:16:23 +02:00
|
|
|
int binlog_setup_trx_data();
|
|
|
|
|
2005-12-22 06:39:02 +01:00
|
|
|
/*
|
2006-02-16 08:30:53 +01:00
|
|
|
Public interface to write RBR events to the binlog
|
2005-12-22 06:39:02 +01:00
|
|
|
*/
|
2006-10-05 10:46:14 +02:00
|
|
|
void binlog_start_trans_and_stmt();
|
BUG#22864 (Rollback following CREATE... SELECT discards 'CREATE TABLE'
from log):
When row-based logging is used, the CREATE-SELECT is written as two
parts: as a CREATE TABLE statement and as the rows for the table. For
both transactional and non-transactional tables, the CREATE TABLE
statement was written to the transaction cache, as were the rows, and
on statement end, the entire transaction cache was written to the binary
log if the table was non-transactional. For transactional tables, the
events were kept in the transaction cache until end of transaction (or
statement that were not part of a transaction).
For the case when AUTOCOMMIT=0 and we are creating a transactional table
using a create select, we would then keep the CREATE TABLE statement and
the rows for the CREATE-SELECT, while executing the following statements.
On a rollback, the transaction cache would then be cleared, which would
also remove the CREATE TABLE statement. Hence no table would be created
on the slave, while there is an empty table on the master.
This relates to BUG#22865 where the table being created exists on the
master, but not on the slave during insertion of rows into the newly
created table. This occurs since the CREATE TABLE statement were still
in the transaction cache until the statement finished executing, and
possibly longer if the table was transactional.
This patch changes the behaviour of the CREATE-SELECT statement by
adding an implicit commit at the end of the statement when creating
non-temporary tables. Hence, non-temporary tables will be written to the
binary log on completion, and in the even of AUTOCOMMIT=0, a new
transaction will be started. Temporary tables do not commit an ongoing
transaction: neither as a pre- not a post-commit.
The events for both transactional and non-transactional tables are
saved in the transaction cache, and written to the binary log at end
of the statement.
2006-12-21 09:29:02 +01:00
|
|
|
int binlog_flush_transaction_cache();
|
|
|
|
void binlog_set_stmt_begin();
|
2006-02-16 08:30:53 +01:00
|
|
|
int binlog_write_table_map(TABLE *table, bool is_transactional);
|
2005-12-22 06:39:02 +01:00
|
|
|
int binlog_write_row(TABLE* table, bool is_transactional,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
|
|
const uchar *buf);
|
2005-12-22 06:39:02 +01:00
|
|
|
int binlog_delete_row(TABLE* table, bool is_transactional,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
|
|
const uchar *buf);
|
2005-12-22 06:39:02 +01:00
|
|
|
int binlog_update_row(TABLE* table, bool is_transactional,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
|
|
const uchar *old_data, const uchar *new_data);
|
2005-12-22 06:39:02 +01:00
|
|
|
|
|
|
|
void set_server_id(uint32 sid) { server_id = sid; }
|
|
|
|
|
|
|
|
/*
|
|
|
|
Member functions to handle pending event for row-level logging.
|
|
|
|
*/
|
|
|
|
template <class RowsEventT> Rows_log_event*
|
|
|
|
binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
|
|
|
|
MY_BITMAP const* cols,
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
size_t colcnt,
|
|
|
|
size_t needed,
|
2006-01-09 15:59:39 +01:00
|
|
|
bool is_transactional,
|
|
|
|
RowsEventT* hint);
|
2005-12-22 06:39:02 +01:00
|
|
|
Rows_log_event* binlog_get_pending_rows_event() const;
|
|
|
|
void binlog_set_pending_rows_event(Rows_log_event* ev);
|
|
|
|
int binlog_flush_pending_rows_event(bool stmt_end);
|
|
|
|
void binlog_delete_pending_rows_event();
|
|
|
|
|
2006-02-16 08:30:53 +01:00
|
|
|
private:
|
|
|
|
uint binlog_table_maps; // Number of table maps currently in the binlog
|
2007-05-14 14:45:38 +02:00
|
|
|
|
|
|
|
enum enum_binlog_flag {
|
|
|
|
BINLOG_FLAG_UNSAFE_STMT_PRINTED,
|
|
|
|
BINLOG_FLAG_COUNT
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
Flags with per-thread information regarding the status of the
|
|
|
|
binary log.
|
|
|
|
*/
|
|
|
|
uint32 binlog_flags;
|
2006-02-16 08:30:53 +01:00
|
|
|
public:
|
2006-05-31 19:21:52 +02:00
|
|
|
uint get_binlog_table_maps() const {
|
|
|
|
return binlog_table_maps;
|
|
|
|
}
|
2006-05-16 11:16:23 +02:00
|
|
|
#endif /* MYSQL_CLIENT */
|
|
|
|
|
2005-12-22 06:39:02 +01:00
|
|
|
public:
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
struct st_transactions {
|
2005-01-16 13:16:23 +01:00
|
|
|
SAVEPOINT *savepoints;
|
2001-12-05 12:03:00 +01:00
|
|
|
THD_TRANS all; // Trans since BEGIN WORK
|
|
|
|
THD_TRANS stmt; // Trans for current statement
|
2005-03-02 10:38:25 +01:00
|
|
|
bool on; // see ha_enable_transaction()
|
2005-12-22 06:39:02 +01:00
|
|
|
XID xid; // transaction identifier
|
|
|
|
enum xa_states xa_state; // used by external XA only
|
2005-08-12 21:15:01 +02:00
|
|
|
XID_STATE xid_state;
|
2005-12-22 06:39:02 +01:00
|
|
|
Rows_log_event *m_pending_rows_event;
|
|
|
|
|
2003-11-14 13:50:19 +01:00
|
|
|
/*
|
2002-03-15 22:57:31 +01:00
|
|
|
Tables changed in transaction (that must be invalidated in query cache).
|
2003-11-14 13:50:19 +01:00
|
|
|
List contain only transactional tables, that not invalidated in query
|
2002-03-15 22:57:31 +01:00
|
|
|
cache (instead of full list of changed in transaction tables).
|
|
|
|
*/
|
|
|
|
CHANGED_TABLE_LIST* changed_tables;
|
|
|
|
MEM_ROOT mem_root; // Transaction-life memory allocation pool
|
|
|
|
void cleanup()
|
|
|
|
{
|
2005-02-22 21:57:57 +01:00
|
|
|
changed_tables= 0;
|
|
|
|
savepoints= 0;
|
2005-01-16 13:16:23 +01:00
|
|
|
#ifdef USING_TRANSACTIONS
|
2002-03-15 22:57:31 +01:00
|
|
|
free_root(&mem_root,MYF(MY_KEEP_PREALLOC));
|
2005-01-16 13:16:23 +01:00
|
|
|
#endif
|
2002-03-15 22:57:31 +01:00
|
|
|
}
|
2005-01-16 13:16:23 +01:00
|
|
|
st_transactions()
|
|
|
|
{
|
2005-11-21 08:52:58 +01:00
|
|
|
#ifdef USING_TRANSACTIONS
|
2005-01-16 13:16:23 +01:00
|
|
|
bzero((char*)this, sizeof(*this));
|
2005-08-12 21:15:01 +02:00
|
|
|
xid_state.xid.null();
|
2005-01-16 13:16:23 +01:00
|
|
|
init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
|
2005-11-21 08:52:58 +01:00
|
|
|
#else
|
|
|
|
xid_state.xa_state= XA_NOTR;
|
2005-01-16 13:16:23 +01:00
|
|
|
#endif
|
2005-11-21 08:52:58 +01:00
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
} transaction;
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
Field *dup_field;
|
2000-07-31 21:29:14 +02:00
|
|
|
#ifndef __WIN__
|
2007-02-23 12:13:55 +01:00
|
|
|
sigset_t signals;
|
2000-07-31 21:29:14 +02:00
|
|
|
#endif
|
2001-03-14 07:07:12 +01:00
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
|
|
Vio* active_vio;
|
2003-11-14 13:50:19 +01:00
|
|
|
#endif
|
2004-10-08 00:21:19 +02:00
|
|
|
/*
|
|
|
|
This is to track items changed during execution of a prepared
|
|
|
|
statement/stored procedure. It's created by
|
|
|
|
register_item_tree_change() in memory root of THD, and freed in
|
2005-07-15 11:21:08 +02:00
|
|
|
rollback_item_tree_changes(). For conventional execution it's always
|
|
|
|
empty.
|
2004-10-08 00:21:19 +02:00
|
|
|
*/
|
|
|
|
Item_change_list change_list;
|
|
|
|
|
2004-02-08 19:14:13 +01:00
|
|
|
/*
|
2005-07-15 11:21:08 +02:00
|
|
|
A permanent memory area of the statement. For conventional
|
|
|
|
execution, the parsed tree and execution runtime reside in the same
|
2005-09-02 15:21:19 +02:00
|
|
|
memory root. In this case stmt_arena points to THD. In case of
|
2005-07-15 11:21:08 +02:00
|
|
|
a prepared statement or a stored procedure statement, thd->mem_root
|
2005-09-02 15:21:19 +02:00
|
|
|
conventionally points to runtime memory, and thd->stmt_arena
|
2005-07-15 11:21:08 +02:00
|
|
|
points to the memory of the PS/SP, where the parsed tree of the
|
|
|
|
statement resides. Whenever you need to perform a permanent
|
|
|
|
transformation of a parsed tree, you should allocate new memory in
|
2005-09-02 15:21:19 +02:00
|
|
|
stmt_arena, to allow correct re-execution of PS/SP.
|
|
|
|
Note: in the parser, stmt_arena == thd, even for PS/SP.
|
2004-02-08 19:14:13 +01:00
|
|
|
*/
|
2005-09-02 15:21:19 +02:00
|
|
|
Query_arena *stmt_arena;
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
/* Tells if LAST_INSERT_ID(#) was called for the current statement */
|
|
|
|
bool arg_of_last_insert_id_function;
|
2003-06-30 12:28:36 +02:00
|
|
|
/*
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
ALL OVER THIS FILE, "insert_id" means "*automatically generated* value for
|
|
|
|
insertion into an auto_increment column".
|
2003-06-30 12:28:36 +02:00
|
|
|
*/
|
|
|
|
/*
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
This is the first autogenerated insert id which was *successfully*
|
|
|
|
inserted by the previous statement (exactly, if the previous statement
|
|
|
|
didn't successfully insert an autogenerated insert id, then it's the one
|
|
|
|
of the statement before, etc).
|
|
|
|
It can also be set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#).
|
|
|
|
It is returned by LAST_INSERT_ID().
|
2003-06-30 12:28:36 +02:00
|
|
|
*/
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
ulonglong first_successful_insert_id_in_prev_stmt;
|
2003-06-30 12:28:36 +02:00
|
|
|
/*
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
Variant of the above, used for storing in statement-based binlog. The
|
|
|
|
difference is that the one above can change as the execution of a stored
|
|
|
|
function progresses, while the one below is set once and then does not
|
|
|
|
change (which is the value which statement-based binlog needs).
|
2003-06-30 12:28:36 +02:00
|
|
|
*/
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
ulonglong first_successful_insert_id_in_prev_stmt_for_binlog;
|
|
|
|
/*
|
|
|
|
This is the first autogenerated insert id which was *successfully*
|
|
|
|
inserted by the current statement. It is maintained only to set
|
|
|
|
first_successful_insert_id_in_prev_stmt when statement ends.
|
|
|
|
*/
|
|
|
|
ulonglong first_successful_insert_id_in_cur_stmt;
|
|
|
|
/*
|
|
|
|
We follow this logic:
|
|
|
|
- when stmt starts, first_successful_insert_id_in_prev_stmt contains the
|
|
|
|
first insert id successfully inserted by the previous stmt.
|
|
|
|
- as stmt makes progress, handler::insert_id_for_cur_row changes; every
|
|
|
|
time get_auto_increment() is called, auto_inc_intervals_for_binlog is
|
|
|
|
augmented with the reserved interval (if statement-based binlogging).
|
|
|
|
- at first successful insertion of an autogenerated value,
|
|
|
|
first_successful_insert_id_in_cur_stmt is set to
|
|
|
|
handler::insert_id_for_cur_row.
|
|
|
|
- when stmt goes to binlog, auto_inc_intervals_for_binlog is
|
|
|
|
binlogged if non-empty.
|
|
|
|
- when stmt ends, first_successful_insert_id_in_prev_stmt is set to
|
|
|
|
first_successful_insert_id_in_cur_stmt.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
stmt_depends_on_first_successful_insert_id_in_prev_stmt is set when
|
|
|
|
LAST_INSERT_ID() is used by a statement.
|
|
|
|
If it is set, first_successful_insert_id_in_prev_stmt_for_binlog will be
|
|
|
|
stored in the statement-based binlog.
|
|
|
|
This variable is CUMULATIVE along the execution of a stored function or
|
|
|
|
trigger: if one substatement sets it to 1 it will stay 1 until the
|
|
|
|
function/trigger ends, thus making sure that
|
|
|
|
first_successful_insert_id_in_prev_stmt_for_binlog does not change anymore
|
|
|
|
and is propagated to the caller for binlogging.
|
|
|
|
*/
|
|
|
|
bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
|
|
|
|
/*
|
|
|
|
List of auto_increment intervals reserved by the thread so far, for
|
|
|
|
storage in the statement-based binlog.
|
|
|
|
Note that its minimum is not first_successful_insert_id_in_cur_stmt:
|
|
|
|
assuming a table with an autoinc column, and this happens:
|
|
|
|
INSERT INTO ... VALUES(3);
|
|
|
|
SET INSERT_ID=3; INSERT IGNORE ... VALUES (NULL);
|
|
|
|
then the latter INSERT will insert no rows
|
|
|
|
(first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
|
|
|
|
in the binlog is still needed; the list's minimum will contain 3.
|
|
|
|
*/
|
|
|
|
Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
|
|
|
|
/* Used by replication and SET INSERT_ID */
|
|
|
|
Discrete_intervals_list auto_inc_intervals_forced;
|
|
|
|
/*
|
|
|
|
There is BUG#19630 where statement-based replication of stored
|
|
|
|
functions/triggers with two auto_increment columns breaks.
|
|
|
|
We however ensure that it works when there is 0 or 1 auto_increment
|
|
|
|
column; our rules are
|
|
|
|
a) on master, while executing a top statement involving substatements,
|
|
|
|
first top- or sub- statement to generate auto_increment values wins the
|
2006-07-10 18:41:03 +02:00
|
|
|
exclusive right to see its values be written to binlog (the write
|
|
|
|
will be done by the statement or its caller), and the losers won't see
|
|
|
|
their values be written to binlog.
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
b) on slave, while replicating a top statement involving substatements,
|
|
|
|
first top- or sub- statement to need to read auto_increment values from
|
|
|
|
the master's binlog wins the exclusive right to read them (so the losers
|
|
|
|
won't read their values from binlog but instead generate on their own).
|
|
|
|
a) implies that we mustn't backup/restore
|
|
|
|
auto_inc_intervals_in_cur_stmt_for_binlog.
|
|
|
|
b) implies that we mustn't backup/restore auto_inc_intervals_forced.
|
|
|
|
|
|
|
|
If there are more than 1 auto_increment columns, then intervals for
|
|
|
|
different columns may mix into the
|
|
|
|
auto_inc_intervals_in_cur_stmt_for_binlog list, which is logically wrong,
|
|
|
|
but there is no point in preventing this mixing by preventing intervals
|
|
|
|
from the secondly inserted column to come into the list, as such
|
|
|
|
prevention would be wrong too.
|
|
|
|
What will happen in the case of
|
|
|
|
INSERT INTO t1 (auto_inc) VALUES(NULL);
|
|
|
|
where t1 has a trigger which inserts into an auto_inc column of t2, is
|
|
|
|
that in binlog we'll store the interval of t1 and the interval of t2 (when
|
|
|
|
we store intervals, soon), then in slave, t1 will use both intervals, t2
|
|
|
|
will use none; if t1 inserts the same number of rows as on master,
|
|
|
|
normally the 2nd interval will not be used by t1, which is fine. t2's
|
|
|
|
values will be wrong if t2's internal auto_increment counter is different
|
|
|
|
from what it was on master (which is likely). In 5.1, in mixed binlogging
|
|
|
|
mode, row-based binlogging is used for such cases where two
|
|
|
|
auto_increment columns are inserted.
|
|
|
|
*/
|
|
|
|
inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id)
|
|
|
|
{
|
|
|
|
if (first_successful_insert_id_in_cur_stmt == 0)
|
|
|
|
first_successful_insert_id_in_cur_stmt= id;
|
|
|
|
}
|
|
|
|
inline ulonglong read_first_successful_insert_id_in_prev_stmt(void)
|
|
|
|
{
|
|
|
|
if (!stmt_depends_on_first_successful_insert_id_in_prev_stmt)
|
|
|
|
{
|
|
|
|
/* It's the first time we read it */
|
|
|
|
first_successful_insert_id_in_prev_stmt_for_binlog=
|
|
|
|
first_successful_insert_id_in_prev_stmt;
|
|
|
|
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
|
|
|
|
}
|
|
|
|
return first_successful_insert_id_in_prev_stmt;
|
|
|
|
}
|
|
|
|
/*
|
2007-03-30 15:29:30 +02:00
|
|
|
Used by Intvar_log_event::do_apply_event() and by "SET INSERT_ID=#"
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
(mysqlbinlog). We'll soon add a variant which can take many intervals in
|
|
|
|
argument.
|
|
|
|
*/
|
|
|
|
inline void force_one_auto_inc_interval(ulonglong next_id)
|
|
|
|
{
|
2006-07-12 08:52:47 +02:00
|
|
|
auto_inc_intervals_forced.empty(); // in case of multiple SET INSERT_ID
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
auto_inc_intervals_forced.append(next_id, ULONGLONG_MAX, 0);
|
|
|
|
}
|
|
|
|
|
2003-06-30 12:28:36 +02:00
|
|
|
ulonglong limit_found_rows;
|
2005-08-15 17:15:12 +02:00
|
|
|
ulonglong options; /* Bitmap of states */
|
|
|
|
longlong row_count_func; /* For the ROW_COUNT() function */
|
2003-08-20 20:57:37 +02:00
|
|
|
ha_rows cuted_fields,
|
2002-07-23 17:31:22 +02:00
|
|
|
sent_row_count, examined_row_count;
|
2005-08-12 16:57:19 +02:00
|
|
|
/*
|
|
|
|
The set of those tables whose fields are referenced in all subqueries
|
|
|
|
of the query.
|
|
|
|
TODO: possibly this it is incorrect to have used tables in THD because
|
|
|
|
with more than one subquery, it is not clear what does the field mean.
|
|
|
|
*/
|
2001-12-05 12:03:00 +01:00
|
|
|
table_map used_tables;
|
2002-05-15 12:50:38 +02:00
|
|
|
USER_CONN *user_connect;
|
2003-11-14 13:50:19 +01:00
|
|
|
CHARSET_INFO *db_charset;
|
2003-12-04 20:08:26 +01:00
|
|
|
/*
|
|
|
|
FIXME: this, and some other variables like 'count_cuted_fields'
|
|
|
|
maybe should be statement/cursor local, that is, moved to Statement
|
2003-12-20 00:16:10 +01:00
|
|
|
class. With current implementation warnings produced in each prepared
|
|
|
|
statement/cursor settle here.
|
2003-12-04 20:08:26 +01:00
|
|
|
*/
|
2003-11-14 13:50:19 +01:00
|
|
|
List <MYSQL_ERROR> warn_list;
|
2002-10-02 12:33:08 +02:00
|
|
|
uint warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END];
|
2003-10-31 13:15:13 +01:00
|
|
|
uint total_warn_count;
|
2004-03-02 20:39:50 +01:00
|
|
|
/*
|
|
|
|
Id of current query. Statement can be reused to execute several queries
|
|
|
|
query_id is global in context of the whole MySQL server.
|
|
|
|
ID is automatically generated from mutex-protected counter.
|
|
|
|
It's used in handler code for various purposes: to check which columns
|
|
|
|
from table are necessary for this select, to check if it's necessary to
|
|
|
|
update auto-updatable fields (like auto_increment and timestamp).
|
|
|
|
*/
|
2005-03-19 01:12:25 +01:00
|
|
|
query_id_t query_id, warn_id;
|
2007-02-23 12:13:55 +01:00
|
|
|
ulong col_access;
|
2003-11-27 18:51:53 +01:00
|
|
|
|
2006-02-03 18:05:30 +01:00
|
|
|
#ifdef ERROR_INJECT_SUPPORT
|
|
|
|
ulong error_inject_value;
|
|
|
|
#endif
|
2003-11-27 18:51:53 +01:00
|
|
|
/* Statement id is thread-wide. This counter is used to generate ids */
|
|
|
|
ulong statement_id_counter;
|
2002-11-07 03:02:37 +01:00
|
|
|
ulong rand_saved_seed1, rand_saved_seed2;
|
2003-04-30 09:02:28 +02:00
|
|
|
ulong row_count; // Row counter, mainly for errors and warnings
|
2007-02-23 12:13:55 +01:00
|
|
|
pthread_t real_id; /* For debugging */
|
|
|
|
my_thread_id thread_id;
|
2005-07-13 11:48:13 +02:00
|
|
|
uint tmp_table, global_read_lock;
|
2006-05-22 20:46:13 +02:00
|
|
|
uint server_status,open_options;
|
|
|
|
enum enum_thread_type system_thread;
|
2006-06-27 12:56:24 +02:00
|
|
|
uint db_length;
|
2002-09-26 22:08:22 +02:00
|
|
|
uint select_number; //number of select (used for EXPLAIN)
|
2002-07-23 17:31:22 +02:00
|
|
|
/* variables.transaction_isolation is reset to this after each commit */
|
|
|
|
enum_tx_isolation session_tx_isolation;
|
2003-10-11 22:26:39 +02:00
|
|
|
enum_check_fields count_cuted_fields;
|
2005-09-07 17:39:47 +02:00
|
|
|
|
|
|
|
DYNAMIC_ARRAY user_var_events; /* For user variables replication */
|
|
|
|
MEM_ROOT *user_var_events_alloc; /* Allocate above array elements here */
|
2003-07-01 21:40:59 +02:00
|
|
|
|
2007-05-28 21:20:22 +02:00
|
|
|
enum killed_state
|
|
|
|
{
|
|
|
|
NOT_KILLED=0,
|
|
|
|
KILL_BAD_DATA=1,
|
|
|
|
KILL_CONNECTION=ER_SERVER_SHUTDOWN,
|
|
|
|
KILL_QUERY=ER_QUERY_INTERRUPTED,
|
|
|
|
KILLED_NO_VALUE /* means neither of the states */
|
|
|
|
};
|
2004-09-15 21:10:31 +02:00
|
|
|
killed_state volatile killed;
|
|
|
|
|
2003-07-01 21:40:59 +02:00
|
|
|
/* scramble - random string sent to client on handshake */
|
2003-07-18 16:57:21 +02:00
|
|
|
char scramble[SCRAMBLE_LENGTH+1];
|
2003-07-01 21:40:59 +02:00
|
|
|
|
2004-06-03 23:17:18 +02:00
|
|
|
bool slave_thread, one_shot_set;
|
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement),
and new binlog format called "mixed" (which is statement-based except if only row-based is correct,
in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release):
SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default;
the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha.
It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE
TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because
NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below).
The added tests test the possibility or impossibility to SET, their effects, and the mixed mode,
including in prepared statements and in stored procedures and functions.
Caveats:
a) The mixed mode will not work for stored functions: in mixed mode, a stored function will
always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()).
b) for the same reason, changing the thread's binlog format inside a stored function is
refused with an error message.
c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask
Dmitri).
Additionally, as the binlog format is now changeable by each user for his session, I remove the implication
which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1
(not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically
set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled
phantom protection).
Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
|
|
|
/* tells if current statement should binlog row-based(1) or stmt-based(0) */
|
|
|
|
bool current_stmt_binlog_row_based;
|
2003-11-27 18:51:53 +01:00
|
|
|
bool locked, some_tables_deleted;
|
2003-04-30 09:02:28 +02:00
|
|
|
bool last_cuted_field;
|
2003-11-27 18:51:53 +01:00
|
|
|
bool no_errors, password, is_fatal_error;
|
2004-09-15 21:10:31 +02:00
|
|
|
bool query_start_used, rand_used, time_zone_used;
|
2006-07-10 15:27:03 +02:00
|
|
|
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
|
|
|
|
bool substitute_null_with_insert_id;
|
2004-08-20 16:35:23 +02:00
|
|
|
bool in_lock_tables;
|
2001-08-21 19:06:00 +02:00
|
|
|
bool query_error, bootstrap, cleanup_done;
|
2003-02-10 16:59:16 +01:00
|
|
|
bool tmp_table_used;
|
2003-08-18 23:08:08 +02:00
|
|
|
bool charset_is_system_charset, charset_is_collation_connection;
|
2006-01-18 09:55:38 +01:00
|
|
|
bool charset_is_character_set_filesystem;
|
2005-06-16 21:05:38 +02:00
|
|
|
bool enable_slow_log; /* enable slow log for current statement */
|
2007-03-23 16:12:58 +01:00
|
|
|
struct {
|
|
|
|
bool all:1;
|
|
|
|
bool stmt:1;
|
|
|
|
} no_trans_update;
|
|
|
|
bool abort_on_warning;
|
2005-02-11 22:33:52 +01:00
|
|
|
bool got_warning; /* Set on call to push_warning() */
|
2005-02-24 22:33:42 +01:00
|
|
|
bool no_warnings_for_error; /* no warnings on call to my_error() */
|
2005-03-28 14:13:31 +02:00
|
|
|
/* set during loop of derived table processing */
|
|
|
|
bool derived_tables_processing;
|
2005-08-15 17:15:12 +02:00
|
|
|
my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */
|
|
|
|
|
Simplistic, experimental framework for Stored Procedures (SPs).
Implements creation and dropping of PROCEDUREs, IN, OUT, and INOUT parameters,
single-statement procedures, rudimentary multi-statement (begin-end) prodedures
(when the client can handle it), and local variables.
Missing most of the embedded SQL language, all attributes, FUNCTIONs, error handling,
reparses procedures at each call (no caching), etc, etc.
Certainly buggy too, but procedures can actually be created and called....
2002-12-08 19:59:22 +01:00
|
|
|
sp_rcontext *spcont; // SP runtime context
|
2003-07-03 15:58:37 +02:00
|
|
|
sp_cache *sp_proc_cache;
|
|
|
|
sp_cache *sp_func_cache;
|
2002-06-12 23:13:12 +02:00
|
|
|
|
2001-12-05 12:03:00 +01:00
|
|
|
/*
|
|
|
|
If we do a purge of binary logs, log index info of the threads
|
|
|
|
that are currently reading it needs to be adjusted. To do that
|
|
|
|
each thread that is using LOG_INFO needs to adjust the pointer to it
|
|
|
|
*/
|
2000-10-27 06:11:55 +02:00
|
|
|
LOG_INFO* current_linfo;
|
2001-12-05 12:03:00 +01:00
|
|
|
NET* slave_net; // network connection from slave -> m.
|
2002-07-23 17:31:22 +02:00
|
|
|
/* Used by the sys_var class to store temporary values */
|
|
|
|
union
|
|
|
|
{
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
my_bool my_bool_value;
|
|
|
|
long long_value;
|
|
|
|
ulong ulong_value;
|
|
|
|
ulonglong ulonglong_value;
|
2002-07-23 17:31:22 +02:00
|
|
|
} sys_var_tmp;
|
2005-08-25 15:34:34 +02:00
|
|
|
|
|
|
|
struct {
|
|
|
|
/*
|
|
|
|
If true, mysql_bin_log::write(Log_event) call will not write events to
|
|
|
|
binlog, and maintain 2 below variables instead (use
|
|
|
|
mysql_bin_log.start_union_events to turn this on)
|
|
|
|
*/
|
|
|
|
bool do_union;
|
|
|
|
/*
|
|
|
|
If TRUE, at least one mysql_bin_log::write(Log_event) call has been
|
|
|
|
made after last mysql_bin_log.start_union_events() call.
|
|
|
|
*/
|
|
|
|
bool unioned_events;
|
|
|
|
/*
|
|
|
|
If TRUE, at least one mysql_bin_log::write(Log_event e), where
|
|
|
|
e.cache_stmt == TRUE call has been made after last
|
|
|
|
mysql_bin_log.start_union_events() call.
|
|
|
|
*/
|
|
|
|
bool unioned_events_trans;
|
2005-09-07 17:39:47 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
'queries' (actually SP statements) that run under inside this binlog
|
|
|
|
union have thd->query_id >= first_query_id.
|
|
|
|
*/
|
|
|
|
query_id_t first_query_id;
|
2005-08-25 15:34:34 +02:00
|
|
|
} binlog_evt_union;
|
2007-04-26 05:38:12 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
Character input stream consumed by the lexical analyser,
|
|
|
|
used during parsing.
|
|
|
|
Note that since the parser is not re-entrant, we keep only one input
|
|
|
|
stream here. This member is valid only when executing code during parsing,
|
|
|
|
and may point to invalid memory after that.
|
|
|
|
*/
|
|
|
|
Lex_input_stream *m_lip;
|
|
|
|
|
2006-03-20 15:46:13 +01:00
|
|
|
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
2006-03-18 15:48:21 +01:00
|
|
|
partition_info *work_part_info;
|
2006-03-20 15:46:13 +01:00
|
|
|
#endif
|
2006-04-13 09:50:33 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
THD();
|
|
|
|
~THD();
|
2002-12-16 14:33:29 +01:00
|
|
|
|
2002-11-16 19:19:10 +01:00
|
|
|
void init(void);
|
2003-12-21 20:26:45 +01:00
|
|
|
/*
|
|
|
|
Initialize memory roots necessary for query processing and (!)
|
|
|
|
pre-allocate memory for it. We can't do that in THD constructor because
|
|
|
|
there are use cases (acl_init, delayed inserts, watcher threads,
|
|
|
|
killing mysqld) where it's vital to not allocate excessive and not used
|
|
|
|
memory. Note, that we still don't return error from init_for_queries():
|
|
|
|
if preallocation fails, we should notice that at the first call to
|
|
|
|
alloc_root.
|
|
|
|
*/
|
|
|
|
void init_for_queries();
|
2002-11-16 19:19:10 +01:00
|
|
|
void change_user(void);
|
2001-08-21 19:06:00 +02:00
|
|
|
void cleanup(void);
|
2004-09-15 21:10:31 +02:00
|
|
|
void cleanup_after_query();
|
2000-07-31 21:29:14 +02:00
|
|
|
bool store_globals();
|
2001-03-14 07:07:12 +01:00
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
|
|
inline void set_active_vio(Vio* vio)
|
2001-03-13 04:17:32 +01:00
|
|
|
{
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_lock(&LOCK_delete);
|
2001-03-14 07:07:12 +01:00
|
|
|
active_vio = vio;
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_unlock(&LOCK_delete);
|
2001-03-13 04:17:32 +01:00
|
|
|
}
|
2001-03-14 07:07:12 +01:00
|
|
|
inline void clear_active_vio()
|
2001-03-13 04:17:32 +01:00
|
|
|
{
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_lock(&LOCK_delete);
|
2001-03-14 07:07:12 +01:00
|
|
|
active_vio = 0;
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_unlock(&LOCK_delete);
|
2001-03-13 04:17:32 +01:00
|
|
|
}
|
2003-01-28 07:38:28 +01:00
|
|
|
void close_active_vio();
|
2005-04-05 13:17:49 +02:00
|
|
|
#endif
|
2003-03-31 10:39:46 +02:00
|
|
|
void awake(THD::killed_state state_to_set);
|
2007-05-29 21:17:09 +02:00
|
|
|
|
|
|
|
#ifndef MYSQL_CLIENT
|
|
|
|
enum enum_binlog_query_type {
|
|
|
|
/*
|
|
|
|
The query can be logged row-based or statement-based
|
|
|
|
*/
|
|
|
|
ROW_QUERY_TYPE,
|
|
|
|
|
|
|
|
/*
|
|
|
|
The query has to be logged statement-based
|
|
|
|
*/
|
|
|
|
STMT_QUERY_TYPE,
|
|
|
|
|
|
|
|
/*
|
|
|
|
The query represents a change to a table in the "mysql"
|
|
|
|
database and is currently mapped to ROW_QUERY_TYPE.
|
|
|
|
*/
|
|
|
|
MYSQL_QUERY_TYPE,
|
|
|
|
QUERY_TYPE_COUNT
|
|
|
|
};
|
|
|
|
|
|
|
|
int binlog_query(enum_binlog_query_type qtype,
|
|
|
|
char const *query, ulong query_len,
|
|
|
|
bool is_trans, bool suppress_use,
|
|
|
|
THD::killed_state killed_err_arg= THD::KILLED_NO_VALUE);
|
|
|
|
#endif
|
|
|
|
|
2004-07-31 09:49:32 +02:00
|
|
|
/*
|
|
|
|
For enter_cond() / exit_cond() to work the mutex must be got before
|
2005-04-05 13:17:49 +02:00
|
|
|
enter_cond(); this mutex is then released by exit_cond().
|
|
|
|
Usage must be: lock mutex; enter_cond(); your code; exit_cond().
|
2004-07-31 09:49:32 +02:00
|
|
|
*/
|
2001-01-17 13:47:33 +01:00
|
|
|
inline const char* enter_cond(pthread_cond_t *cond, pthread_mutex_t* mutex,
|
|
|
|
const char* msg)
|
|
|
|
{
|
|
|
|
const char* old_msg = proc_info;
|
2005-04-05 13:17:49 +02:00
|
|
|
safe_mutex_assert_owner(mutex);
|
2001-01-17 13:47:33 +01:00
|
|
|
mysys_var->current_mutex = mutex;
|
|
|
|
mysys_var->current_cond = cond;
|
|
|
|
proc_info = msg;
|
|
|
|
return old_msg;
|
|
|
|
}
|
|
|
|
inline void exit_cond(const char* old_msg)
|
|
|
|
{
|
2004-07-31 22:33:20 +02:00
|
|
|
/*
|
|
|
|
Putting the mutex unlock in exit_cond() ensures that
|
|
|
|
mysys_var->current_mutex is always unlocked _before_ mysys_var->mutex is
|
|
|
|
locked (if that would not be the case, you'll get a deadlock if someone
|
|
|
|
does a THD::awake() on you).
|
|
|
|
*/
|
|
|
|
pthread_mutex_unlock(mysys_var->current_mutex);
|
2001-01-17 13:47:33 +01:00
|
|
|
pthread_mutex_lock(&mysys_var->mutex);
|
|
|
|
mysys_var->current_mutex = 0;
|
|
|
|
mysys_var->current_cond = 0;
|
|
|
|
proc_info = old_msg;
|
|
|
|
pthread_mutex_unlock(&mysys_var->mutex);
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
inline time_t query_start() { query_start_used=1; return start_time; }
|
2000-11-16 19:47:28 +01:00
|
|
|
inline void set_time() { if (user_time) start_time=time_after_lock=user_time; else time_after_lock=time(&start_time); }
|
2000-10-14 02:16:35 +02:00
|
|
|
inline void end_time() { time(&start_time); }
|
2000-11-16 19:47:28 +01:00
|
|
|
inline void set_time(time_t t) { time_after_lock=start_time=user_time=t; }
|
2000-09-16 03:27:21 +02:00
|
|
|
inline void lock_time() { time(&time_after_lock); }
|
2001-04-11 23:54:35 +02:00
|
|
|
inline ulonglong found_rows(void)
|
|
|
|
{
|
|
|
|
return limit_found_rows;
|
2005-01-16 13:16:23 +01:00
|
|
|
}
|
2000-11-24 00:51:18 +01:00
|
|
|
inline bool active_transaction()
|
|
|
|
{
|
2005-01-16 13:16:23 +01:00
|
|
|
#ifdef USING_TRANSACTIONS
|
|
|
|
return server_status & SERVER_STATUS_IN_TRANS;
|
2001-01-03 05:46:33 +01:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
2000-11-24 00:51:18 +01:00
|
|
|
}
|
2004-11-05 16:29:47 +01:00
|
|
|
inline bool fill_derived_tables()
|
|
|
|
{
|
2005-09-02 15:21:19 +02:00
|
|
|
return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();
|
2004-11-05 16:29:47 +01:00
|
|
|
}
|
2006-06-20 12:20:32 +02:00
|
|
|
inline bool fill_information_schema_tables()
|
|
|
|
{
|
|
|
|
return !stmt_arena->is_stmt_prepare();
|
|
|
|
}
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
inline void* trans_alloc(unsigned int size)
|
2004-11-05 16:29:47 +01:00
|
|
|
{
|
2002-03-15 22:57:31 +01:00
|
|
|
return alloc_root(&transaction.mem_root,size);
|
|
|
|
}
|
2004-02-12 02:10:26 +01:00
|
|
|
|
|
|
|
bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
|
|
|
|
const char *from, uint from_length,
|
|
|
|
CHARSET_INFO *from_cs);
|
2004-05-25 00:03:49 +02:00
|
|
|
|
|
|
|
bool convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs);
|
|
|
|
|
2002-03-15 22:57:31 +01:00
|
|
|
void add_changed_table(TABLE *table);
|
2002-09-19 09:36:19 +02:00
|
|
|
void add_changed_table(const char *key, long key_length);
|
|
|
|
CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length);
|
2002-09-26 22:08:22 +02:00
|
|
|
int send_explain_fields(select_result *result);
|
2002-12-16 14:33:29 +01:00
|
|
|
#ifndef EMBEDDED_LIBRARY
|
2002-11-03 23:56:25 +01:00
|
|
|
inline void clear_error()
|
|
|
|
{
|
2006-11-01 18:41:09 +01:00
|
|
|
DBUG_ENTER("clear_error");
|
2002-11-03 23:56:25 +01:00
|
|
|
net.last_error[0]= 0;
|
|
|
|
net.last_errno= 0;
|
|
|
|
net.report_error= 0;
|
2004-10-20 03:04:37 +02:00
|
|
|
query_error= 0;
|
2006-11-01 18:41:09 +01:00
|
|
|
DBUG_VOID_RETURN;
|
2002-11-03 23:56:25 +01:00
|
|
|
}
|
2004-06-04 10:02:35 +02:00
|
|
|
inline bool vio_ok() const { return net.vio != 0; }
|
2002-12-16 14:33:29 +01:00
|
|
|
#else
|
|
|
|
void clear_error();
|
2004-05-28 12:59:29 +02:00
|
|
|
inline bool vio_ok() const { return true; }
|
2002-12-16 14:33:29 +01:00
|
|
|
#endif
|
2003-01-30 21:15:44 +01:00
|
|
|
inline void fatal_error()
|
|
|
|
{
|
|
|
|
is_fatal_error= 1;
|
2005-06-15 19:58:35 +02:00
|
|
|
net.report_error= 1;
|
2003-04-02 15:16:19 +02:00
|
|
|
DBUG_PRINT("error",("Fatal error set"));
|
2003-01-30 21:15:44 +01:00
|
|
|
}
|
2003-05-21 14:44:12 +02:00
|
|
|
inline CHARSET_INFO *charset() { return variables.character_set_client; }
|
2003-08-18 23:08:08 +02:00
|
|
|
void update_charset();
|
2004-10-08 00:21:19 +02:00
|
|
|
|
2005-09-02 15:21:19 +02:00
|
|
|
inline Query_arena *activate_stmt_arena_if_needed(Query_arena *backup)
|
2004-11-08 00:13:54 +01:00
|
|
|
{
|
|
|
|
/*
|
2005-09-02 15:21:19 +02:00
|
|
|
Use the persistent arena if we are in a prepared statement or a stored
|
|
|
|
procedure statement and we have not already changed to use this arena.
|
2004-11-08 00:13:54 +01:00
|
|
|
*/
|
2005-09-02 15:21:19 +02:00
|
|
|
if (!stmt_arena->is_conventional() && mem_root != stmt_arena->mem_root)
|
2004-11-08 00:13:54 +01:00
|
|
|
{
|
2005-09-02 15:21:19 +02:00
|
|
|
set_n_backup_active_arena(stmt_arena, backup);
|
|
|
|
return stmt_arena;
|
2004-11-08 00:13:54 +01:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-10-10 01:10:00 +02:00
|
|
|
void change_item_tree(Item **place, Item *new_value)
|
2004-10-08 00:21:19 +02:00
|
|
|
{
|
2004-10-10 01:10:00 +02:00
|
|
|
/* TODO: check for OOM condition here */
|
2005-09-02 15:21:19 +02:00
|
|
|
if (!stmt_arena->is_conventional())
|
2004-11-08 00:13:54 +01:00
|
|
|
nocheck_register_item_tree_change(place, *place, mem_root);
|
2004-10-10 01:10:00 +02:00
|
|
|
*place= new_value;
|
2004-10-08 00:21:19 +02:00
|
|
|
}
|
|
|
|
void nocheck_register_item_tree_change(Item **place, Item *old_value,
|
2004-10-10 01:10:00 +02:00
|
|
|
MEM_ROOT *runtime_memroot);
|
2004-10-08 00:21:19 +02:00
|
|
|
void rollback_item_tree_changes();
|
2004-10-14 00:53:59 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Cleanup statement parse state (parse tree, lex) and execution
|
|
|
|
state after execution of a non-prepared SQL statement.
|
|
|
|
*/
|
|
|
|
void end_statement();
|
2004-09-15 21:10:31 +02:00
|
|
|
inline int killed_errno() const
|
|
|
|
{
|
2007-05-28 21:20:22 +02:00
|
|
|
killed_state killed_val; /* to cache the volatile 'killed' */
|
|
|
|
return (killed_val= killed) != KILL_BAD_DATA ? killed_val : 0;
|
2004-09-15 21:10:31 +02:00
|
|
|
}
|
|
|
|
inline void send_kill_message() const
|
|
|
|
{
|
2004-11-12 13:34:00 +01:00
|
|
|
int err= killed_errno();
|
2005-04-05 00:19:48 +02:00
|
|
|
if (err)
|
|
|
|
my_message(err, ER(err), MYF(0));
|
2004-09-15 21:10:31 +02:00
|
|
|
}
|
2004-09-28 19:08:00 +02:00
|
|
|
/* return TRUE if we will abort query if we make a warning now */
|
|
|
|
inline bool really_abort_on_warning()
|
|
|
|
{
|
|
|
|
return (abort_on_warning &&
|
2007-03-23 16:12:58 +01:00
|
|
|
(!no_trans_update.stmt ||
|
2004-09-28 19:08:00 +02:00
|
|
|
(variables.sql_mode & MODE_STRICT_ALL_TABLES)));
|
|
|
|
}
|
2004-09-13 15:48:01 +02:00
|
|
|
void set_status_var_init();
|
2005-07-15 23:01:44 +02:00
|
|
|
bool is_context_analysis_only()
|
2005-09-02 15:21:19 +02:00
|
|
|
{ return stmt_arena->is_stmt_prepare() || lex->view_prepare_mode; }
|
2005-08-08 15:46:06 +02:00
|
|
|
void reset_n_backup_open_tables_state(Open_tables_state *backup);
|
|
|
|
void restore_backup_open_tables_state(Open_tables_state *backup);
|
2005-08-15 17:15:12 +02:00
|
|
|
void reset_sub_statement_state(Sub_statement_state *backup, uint new_state);
|
|
|
|
void restore_sub_statement_state(Sub_statement_state *backup);
|
2005-09-02 15:21:19 +02:00
|
|
|
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
|
|
|
|
void restore_active_arena(Query_arena *set, Query_arena *backup);
|
2007-05-14 14:45:38 +02:00
|
|
|
|
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement),
and new binlog format called "mixed" (which is statement-based except if only row-based is correct,
in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release):
SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default;
the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha.
It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE
TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because
NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below).
The added tests test the possibility or impossibility to SET, their effects, and the mixed mode,
including in prepared statements and in stored procedures and functions.
Caveats:
a) The mixed mode will not work for stored functions: in mixed mode, a stored function will
always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()).
b) for the same reason, changing the thread's binlog format inside a stored function is
refused with an error message.
c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask
Dmitri).
Additionally, as the binlog format is now changeable by each user for his session, I remove the implication
which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1
(not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically
set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled
phantom protection).
Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
|
|
|
inline void set_current_stmt_binlog_row_based_if_mixed()
|
|
|
|
{
|
* Mixed replication mode * :
1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks
statement-based binlog":
a stored function inserting into two such tables may fail to replicate
(inserting wrong data in the slave's copy of the second table) if the slave's
second table had an internal auto_increment counter different from master's.
Because the auto_increment value autogenerated by master for the 2nd table
does not go into binlog, only the first does, so the slave lacks information.
To fix this, if running in mixed binlogging mode, if the stored function or
trigger plans to update two different tables both having auto_increment
columns, we switch to row-based for the whole function.
We don't have a simple solution for statement-based binlogging mode, there
the bug remains and will be documented as a known problem.
Re-enabling rpl_switch_stm_row_mixed.
2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored
functions, triggers, views", which was a documented limitation (in mixed
mode, we didn't detect that a stored function's execution needed row-based
binlogging (due to some UUID() call for example); same for
triggers, same for views (a view created from a SELECT UUID(), and doing
INSERT INTO sometable SELECT theview; would not replicate row-based).
This is implemented by, after parsing a routine's body, remembering in sp_head
that this routine needs row-based binlogging. Then when this routine is used,
the caller is marked to require row-based binlogging too.
Same for views: when we parse a view and detect that its SELECT needs
row-based binary logging, we mark the calling LEX as such.
3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog":
a temporary table containing e.g. UUID has its changes not binlogged,
so any query updating a permanent table with data from the temporary table
will run wrongly on slave. Solution: in mixed mode we don't switch back
from row-based to statement-based when there exists temporary tables.
4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog;
impossible due to BUG#11312 and BUG#20329, but test is in place for when
they are fixed.
2006-07-09 17:00:47 +02:00
|
|
|
/*
|
|
|
|
If in a stored/function trigger, the caller should already have done the
|
|
|
|
change. We test in_sub_stmt to prevent introducing bugs where people
|
|
|
|
wouldn't ensure that, and would switch to row-based mode in the middle
|
|
|
|
of executing a stored function/trigger (which is too late, see also
|
|
|
|
reset_current_stmt_binlog_row_based()); this condition will make their
|
|
|
|
tests fail and so force them to propagate the
|
|
|
|
lex->binlog_row_based_if_mixed upwards to the caller.
|
|
|
|
*/
|
|
|
|
if ((variables.binlog_format == BINLOG_FORMAT_MIXED) &&
|
|
|
|
(in_sub_stmt == 0))
|
2006-05-16 11:16:23 +02:00
|
|
|
current_stmt_binlog_row_based= TRUE;
|
2006-03-08 06:55:21 +01:00
|
|
|
}
|
|
|
|
inline void set_current_stmt_binlog_row_based()
|
|
|
|
{
|
2006-05-16 11:16:23 +02:00
|
|
|
current_stmt_binlog_row_based= TRUE;
|
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement),
and new binlog format called "mixed" (which is statement-based except if only row-based is correct,
in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release):
SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default;
the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha.
It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE
TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because
NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below).
The added tests test the possibility or impossibility to SET, their effects, and the mixed mode,
including in prepared statements and in stored procedures and functions.
Caveats:
a) The mixed mode will not work for stored functions: in mixed mode, a stored function will
always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()).
b) for the same reason, changing the thread's binlog format inside a stored function is
refused with an error message.
c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask
Dmitri).
Additionally, as the binlog format is now changeable by each user for his session, I remove the implication
which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1
(not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically
set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled
phantom protection).
Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
|
|
|
}
|
2006-06-01 11:53:27 +02:00
|
|
|
inline void clear_current_stmt_binlog_row_based()
|
|
|
|
{
|
2006-06-13 22:09:59 +02:00
|
|
|
current_stmt_binlog_row_based= FALSE;
|
2006-06-23 02:21:12 +02:00
|
|
|
}
|
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement),
and new binlog format called "mixed" (which is statement-based except if only row-based is correct,
in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release):
SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default;
the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha.
It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE
TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because
NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below).
The added tests test the possibility or impossibility to SET, their effects, and the mixed mode,
including in prepared statements and in stored procedures and functions.
Caveats:
a) The mixed mode will not work for stored functions: in mixed mode, a stored function will
always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()).
b) for the same reason, changing the thread's binlog format inside a stored function is
refused with an error message.
c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask
Dmitri).
Additionally, as the binlog format is now changeable by each user for his session, I remove the implication
which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1
(not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically
set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled
phantom protection).
Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
|
|
|
inline void reset_current_stmt_binlog_row_based()
|
|
|
|
{
|
* Mixed replication mode * :
1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks
statement-based binlog":
a stored function inserting into two such tables may fail to replicate
(inserting wrong data in the slave's copy of the second table) if the slave's
second table had an internal auto_increment counter different from master's.
Because the auto_increment value autogenerated by master for the 2nd table
does not go into binlog, only the first does, so the slave lacks information.
To fix this, if running in mixed binlogging mode, if the stored function or
trigger plans to update two different tables both having auto_increment
columns, we switch to row-based for the whole function.
We don't have a simple solution for statement-based binlogging mode, there
the bug remains and will be documented as a known problem.
Re-enabling rpl_switch_stm_row_mixed.
2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored
functions, triggers, views", which was a documented limitation (in mixed
mode, we didn't detect that a stored function's execution needed row-based
binlogging (due to some UUID() call for example); same for
triggers, same for views (a view created from a SELECT UUID(), and doing
INSERT INTO sometable SELECT theview; would not replicate row-based).
This is implemented by, after parsing a routine's body, remembering in sp_head
that this routine needs row-based binlogging. Then when this routine is used,
the caller is marked to require row-based binlogging too.
Same for views: when we parse a view and detect that its SELECT needs
row-based binary logging, we mark the calling LEX as such.
3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog":
a temporary table containing e.g. UUID has its changes not binlogged,
so any query updating a permanent table with data from the temporary table
will run wrongly on slave. Solution: in mixed mode we don't switch back
from row-based to statement-based when there exists temporary tables.
4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog;
impossible due to BUG#11312 and BUG#20329, but test is in place for when
they are fixed.
2006-07-09 17:00:47 +02:00
|
|
|
/*
|
|
|
|
If there are temporary tables, don't reset back to
|
|
|
|
statement-based. Indeed it could be that:
|
|
|
|
CREATE TEMPORARY TABLE t SELECT UUID(); # row-based
|
|
|
|
# and row-based does not store updates to temp tables
|
|
|
|
# in the binlog.
|
|
|
|
INSERT INTO u SELECT * FROM t; # stmt-based
|
|
|
|
and then the INSERT will fail as data inserted into t was not logged.
|
|
|
|
So we continue with row-based until the temp table is dropped.
|
|
|
|
If we are in a stored function or trigger, we mustn't reset in the
|
|
|
|
middle of its execution (as the binary logging way of a stored function
|
|
|
|
or trigger is decided when it starts executing, depending for example on
|
|
|
|
the caller (for a stored function: if caller is SELECT or
|
|
|
|
INSERT/UPDATE/DELETE...).
|
2006-11-01 22:35:58 +01:00
|
|
|
|
|
|
|
Don't reset binlog format for NDB binlog injector thread.
|
* Mixed replication mode * :
1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks
statement-based binlog":
a stored function inserting into two such tables may fail to replicate
(inserting wrong data in the slave's copy of the second table) if the slave's
second table had an internal auto_increment counter different from master's.
Because the auto_increment value autogenerated by master for the 2nd table
does not go into binlog, only the first does, so the slave lacks information.
To fix this, if running in mixed binlogging mode, if the stored function or
trigger plans to update two different tables both having auto_increment
columns, we switch to row-based for the whole function.
We don't have a simple solution for statement-based binlogging mode, there
the bug remains and will be documented as a known problem.
Re-enabling rpl_switch_stm_row_mixed.
2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored
functions, triggers, views", which was a documented limitation (in mixed
mode, we didn't detect that a stored function's execution needed row-based
binlogging (due to some UUID() call for example); same for
triggers, same for views (a view created from a SELECT UUID(), and doing
INSERT INTO sometable SELECT theview; would not replicate row-based).
This is implemented by, after parsing a routine's body, remembering in sp_head
that this routine needs row-based binlogging. Then when this routine is used,
the caller is marked to require row-based binlogging too.
Same for views: when we parse a view and detect that its SELECT needs
row-based binary logging, we mark the calling LEX as such.
3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog":
a temporary table containing e.g. UUID has its changes not binlogged,
so any query updating a permanent table with data from the temporary table
will run wrongly on slave. Solution: in mixed mode we don't switch back
from row-based to statement-based when there exists temporary tables.
4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog;
impossible due to BUG#11312 and BUG#20329, but test is in place for when
they are fixed.
2006-07-09 17:00:47 +02:00
|
|
|
*/
|
2006-11-01 22:35:58 +01:00
|
|
|
if ((temporary_tables == NULL) && (in_sub_stmt == 0) &&
|
|
|
|
(system_thread != SYSTEM_THREAD_NDBCLUSTER_BINLOG))
|
* Mixed replication mode * :
1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks
statement-based binlog":
a stored function inserting into two such tables may fail to replicate
(inserting wrong data in the slave's copy of the second table) if the slave's
second table had an internal auto_increment counter different from master's.
Because the auto_increment value autogenerated by master for the 2nd table
does not go into binlog, only the first does, so the slave lacks information.
To fix this, if running in mixed binlogging mode, if the stored function or
trigger plans to update two different tables both having auto_increment
columns, we switch to row-based for the whole function.
We don't have a simple solution for statement-based binlogging mode, there
the bug remains and will be documented as a known problem.
Re-enabling rpl_switch_stm_row_mixed.
2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored
functions, triggers, views", which was a documented limitation (in mixed
mode, we didn't detect that a stored function's execution needed row-based
binlogging (due to some UUID() call for example); same for
triggers, same for views (a view created from a SELECT UUID(), and doing
INSERT INTO sometable SELECT theview; would not replicate row-based).
This is implemented by, after parsing a routine's body, remembering in sp_head
that this routine needs row-based binlogging. Then when this routine is used,
the caller is marked to require row-based binlogging too.
Same for views: when we parse a view and detect that its SELECT needs
row-based binary logging, we mark the calling LEX as such.
3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog":
a temporary table containing e.g. UUID has its changes not binlogged,
so any query updating a permanent table with data from the temporary table
will run wrongly on slave. Solution: in mixed mode we don't switch back
from row-based to statement-based when there exists temporary tables.
4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog;
impossible due to BUG#11312 and BUG#20329, but test is in place for when
they are fixed.
2006-07-09 17:00:47 +02:00
|
|
|
{
|
|
|
|
current_stmt_binlog_row_based=
|
|
|
|
test(variables.binlog_format == BINLOG_FORMAT_ROW);
|
|
|
|
}
|
2006-07-12 08:52:47 +02:00
|
|
|
}
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Initialize the current database from a NULL-terminated string with length
|
2006-06-28 21:47:45 +02:00
|
|
|
If we run out of memory, we free the current database and return TRUE.
|
|
|
|
This way the user will notice the error as there will be no current
|
|
|
|
database selected (in addition to the error message set by malloc).
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
*/
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
bool set_db(const char *new_db, size_t new_db_len)
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
{
|
2006-06-28 21:47:45 +02:00
|
|
|
/* Do not reallocate memory if current chunk is big enough. */
|
|
|
|
if (db && new_db && db_length >= new_db_len)
|
|
|
|
memcpy(db, new_db, new_db_len+1);
|
|
|
|
else
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
{
|
2006-07-13 15:34:49 +02:00
|
|
|
x_free(db);
|
|
|
|
db= new_db ? my_strndup(new_db, new_db_len, MYF(MY_WME)) : NULL;
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
}
|
2006-06-28 21:47:45 +02:00
|
|
|
db_length= db ? new_db_len : 0;
|
|
|
|
return new_db && !db;
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
}
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
void reset_db(char *new_db, size_t new_db_len)
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
{
|
|
|
|
db= new_db;
|
|
|
|
db_length= new_db_len;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
Copy the current database to the argument. Use the current arena to
|
|
|
|
allocate memory for a deep copy: current database may be freed after
|
|
|
|
a statement is parsed but before it's executed.
|
|
|
|
*/
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
bool copy_db_to(char **p_db, size_t *p_db_length)
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
{
|
|
|
|
if (db == NULL)
|
|
|
|
{
|
|
|
|
my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
*p_db= strmake(db, db_length);
|
2006-10-16 18:57:33 +02:00
|
|
|
*p_db_length= db_length;
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
return FALSE;
|
|
|
|
}
|
2007-02-23 12:13:55 +01:00
|
|
|
thd_scheduler scheduler;
|
2007-03-06 21:46:33 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
/**
|
|
|
|
Add an internal error handler to the thread execution context.
|
|
|
|
@param handler the exception handler to add
|
|
|
|
*/
|
|
|
|
void push_internal_handler(Internal_error_handler *handler);
|
|
|
|
|
|
|
|
/**
|
|
|
|
Handle an error condition.
|
|
|
|
@param sql_errno the error number
|
|
|
|
@param level the error level
|
|
|
|
@return true if the error is handled
|
|
|
|
*/
|
|
|
|
virtual bool handle_error(uint sql_errno,
|
|
|
|
MYSQL_ERROR::enum_warning_level level);
|
|
|
|
|
|
|
|
/**
|
|
|
|
Remove the error handler last pushed.
|
|
|
|
*/
|
|
|
|
void pop_internal_handler();
|
|
|
|
|
|
|
|
private:
|
|
|
|
/** The current internal error handler for this thread, or NULL. */
|
|
|
|
Internal_error_handler *m_internal_handler;
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/**
|
|
|
|
The lex to hold the parsed tree of conventional (non-prepared) queries.
|
|
|
|
Whereas for prepared and stored procedure statements we use an own lex
|
|
|
|
instance for each new query, for conventional statements we reuse
|
|
|
|
the same lex. (@see mysql_parse for details).
|
|
|
|
*/
|
|
|
|
LEX main_lex;
|
|
|
|
/**
|
|
|
|
This memory root is used for two purposes:
|
|
|
|
- for conventional queries, to allocate structures stored in main_lex
|
|
|
|
during parsing, and allocate runtime data (execution plan, etc.)
|
|
|
|
during execution.
|
|
|
|
- for prepared queries, only to allocate runtime data. The parsed
|
|
|
|
tree itself is reused between executions and thus is stored elsewhere.
|
|
|
|
*/
|
|
|
|
MEM_ROOT main_mem_root;
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
2005-07-13 11:48:13 +02:00
|
|
|
|
2004-12-03 02:44:33 +01:00
|
|
|
#define tmp_disable_binlog(A) \
|
2005-08-12 12:54:42 +02:00
|
|
|
{ulonglong tmp_disable_binlog__save_options= (A)->options; \
|
2005-06-23 17:29:10 +02:00
|
|
|
(A)->options&= ~OPTION_BIN_LOG
|
2004-12-03 02:44:33 +01:00
|
|
|
|
2005-06-23 17:29:10 +02:00
|
|
|
#define reenable_binlog(A) (A)->options= tmp_disable_binlog__save_options;}
|
2004-12-03 00:05:11 +01:00
|
|
|
|
2003-12-04 22:42:18 +01:00
|
|
|
|
2002-03-16 09:36:27 +01:00
|
|
|
/*
|
2007-02-14 14:44:34 +01:00
|
|
|
Used to hold information about file and file structure in exchange
|
2002-03-16 09:36:27 +01:00
|
|
|
via non-DB file (...INTO OUTFILE..., ...LOAD DATA...)
|
2004-08-24 18:17:11 +02:00
|
|
|
XXX: We never call destructor for objects of this class.
|
2002-03-16 09:36:27 +01:00
|
|
|
*/
|
2004-08-24 18:17:11 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
class sql_exchange :public Sql_alloc
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
char *file_name;
|
|
|
|
String *field_term,*enclosed,*line_term,*line_start,*escaped;
|
|
|
|
bool opt_enclosed;
|
|
|
|
bool dumpfile;
|
2003-12-14 05:39:52 +01:00
|
|
|
ulong skip_lines;
|
2007-02-28 14:06:57 +01:00
|
|
|
CHARSET_INFO *cs;
|
2000-07-31 21:29:14 +02:00
|
|
|
sql_exchange(char *name,bool dumpfile_flag);
|
|
|
|
};
|
|
|
|
|
|
|
|
#include "log_event.h"
|
|
|
|
|
|
|
|
/*
|
2002-06-02 20:22:20 +02:00
|
|
|
This is used to get result from a select
|
2000-07-31 21:29:14 +02:00
|
|
|
*/
|
|
|
|
|
2001-07-01 12:20:53 +02:00
|
|
|
class JOIN;
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
class select_result :public Sql_alloc {
|
|
|
|
protected:
|
|
|
|
THD *thd;
|
2002-05-08 22:14:40 +02:00
|
|
|
SELECT_LEX_UNIT *unit;
|
2000-07-31 21:29:14 +02:00
|
|
|
public:
|
|
|
|
select_result();
|
|
|
|
virtual ~select_result() {};
|
2002-05-08 22:14:40 +02:00
|
|
|
virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|
|
|
{
|
|
|
|
unit= u;
|
|
|
|
return 0;
|
|
|
|
}
|
2005-01-19 21:20:55 +01:00
|
|
|
virtual int prepare2(void) { return 0; }
|
2004-10-21 16:33:53 +02:00
|
|
|
/*
|
|
|
|
Because of peculiarities of prepared statements protocol
|
|
|
|
we need to know number of columns in the result set (if
|
|
|
|
there is a result set) apart from sending columns metadata.
|
|
|
|
*/
|
|
|
|
virtual uint field_count(List<Item> &fields) const
|
|
|
|
{ return fields.elements; }
|
2004-08-03 12:32:21 +02:00
|
|
|
virtual bool send_fields(List<Item> &list, uint flags)=0;
|
2000-07-31 21:29:14 +02:00
|
|
|
virtual bool send_data(List<Item> &items)=0;
|
2002-11-29 15:40:18 +01:00
|
|
|
virtual bool initialize_tables (JOIN *join=0) { return 0; }
|
2003-10-08 17:53:31 +02:00
|
|
|
virtual void send_error(uint errcode,const char *err);
|
2000-07-31 21:29:14 +02:00
|
|
|
virtual bool send_eof()=0;
|
2006-12-01 11:25:06 +01:00
|
|
|
/**
|
|
|
|
Check if this query returns a result set and therefore is allowed in
|
|
|
|
cursors and set an error message if it is not the case.
|
|
|
|
|
|
|
|
@retval FALSE success
|
|
|
|
@retval TRUE error, an error message is set
|
|
|
|
*/
|
|
|
|
virtual bool check_simple_select() const;
|
2000-07-31 21:29:14 +02:00
|
|
|
virtual void abort() {}
|
2004-08-24 18:17:11 +02:00
|
|
|
/*
|
|
|
|
Cleanup instance of this class for next execution of a prepared
|
|
|
|
statement/stored procedure.
|
|
|
|
*/
|
|
|
|
virtual void cleanup();
|
2005-08-09 22:23:56 +02:00
|
|
|
void set_thd(THD *thd_arg) { thd= thd_arg; }
|
2006-01-04 11:20:28 +01:00
|
|
|
#ifdef EMBEDDED_LIBRARY
|
|
|
|
virtual void begin_dataset() {}
|
|
|
|
#else
|
|
|
|
void begin_dataset() {}
|
|
|
|
#endif
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2004-10-21 16:33:53 +02:00
|
|
|
/*
|
|
|
|
Base class for select_result descendands which intercept and
|
|
|
|
transform result set rows. As the rows are not sent to the client,
|
|
|
|
sending of result set metadata should be suppressed as well.
|
|
|
|
*/
|
|
|
|
|
|
|
|
class select_result_interceptor: public select_result
|
|
|
|
{
|
|
|
|
public:
|
2006-02-25 16:46:30 +01:00
|
|
|
select_result_interceptor() {} /* Remove gcc warning */
|
2004-10-21 16:33:53 +02:00
|
|
|
uint field_count(List<Item> &fields) const { return 0; }
|
|
|
|
bool send_fields(List<Item> &fields, uint flag) { return FALSE; }
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
class select_send :public select_result {
|
2005-09-13 15:32:42 +02:00
|
|
|
int status;
|
2000-07-31 21:29:14 +02:00
|
|
|
public:
|
2005-09-13 15:32:42 +02:00
|
|
|
select_send() :status(0) {}
|
2004-08-03 12:32:21 +02:00
|
|
|
bool send_fields(List<Item> &list, uint flags);
|
2000-07-31 21:29:14 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
bool send_eof();
|
2006-12-01 11:25:06 +01:00
|
|
|
virtual bool check_simple_select() const { return FALSE; }
|
2005-09-13 15:32:42 +02:00
|
|
|
void abort();
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2004-10-21 16:33:53 +02:00
|
|
|
class select_to_file :public select_result_interceptor {
|
2004-02-05 10:22:08 +01:00
|
|
|
protected:
|
2000-07-31 21:29:14 +02:00
|
|
|
sql_exchange *exchange;
|
|
|
|
File file;
|
|
|
|
IO_CACHE cache;
|
|
|
|
ha_rows row_count;
|
2004-02-05 10:22:08 +01:00
|
|
|
char path[FN_REFLEN];
|
|
|
|
|
|
|
|
public:
|
|
|
|
select_to_file(sql_exchange *ex) :exchange(ex), file(-1),row_count(0L)
|
|
|
|
{ path[0]=0; }
|
|
|
|
~select_to_file();
|
|
|
|
void send_error(uint errcode,const char *err);
|
2004-08-24 18:17:11 +02:00
|
|
|
bool send_eof();
|
|
|
|
void cleanup();
|
2004-02-05 10:22:08 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class select_export :public select_to_file {
|
2000-07-31 21:29:14 +02:00
|
|
|
uint field_term_length;
|
|
|
|
int field_sep_char,escape_char,line_sep_char;
|
|
|
|
bool fixed_row_size;
|
|
|
|
public:
|
2004-02-05 10:22:08 +01:00
|
|
|
select_export(sql_exchange *ex) :select_to_file(ex) {}
|
2000-07-31 21:29:14 +02:00
|
|
|
~select_export();
|
2002-05-08 22:14:40 +02:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2000-07-31 21:29:14 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
};
|
|
|
|
|
2001-08-02 05:29:50 +02:00
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
class select_dump :public select_to_file {
|
2000-07-31 21:29:14 +02:00
|
|
|
public:
|
2004-02-05 10:22:08 +01:00
|
|
|
select_dump(sql_exchange *ex) :select_to_file(ex) {}
|
2002-05-08 22:14:40 +02:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2000-07-31 21:29:14 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
};
|
2001-08-02 05:29:50 +02:00
|
|
|
|
|
|
|
|
2004-10-21 16:33:53 +02:00
|
|
|
class select_insert :public select_result_interceptor {
|
2001-07-11 13:06:41 +02:00
|
|
|
public:
|
2004-07-16 00:15:55 +02:00
|
|
|
TABLE_LIST *table_list;
|
2000-07-31 21:29:14 +02:00
|
|
|
TABLE *table;
|
|
|
|
List<Item> *fields;
|
WL#3146 "less locking in auto_increment":
this is a cleanup patch for our current auto_increment handling:
new names for auto_increment variables in THD, new methods to manipulate them
(see sql_class.h), some move into handler::, causing less backup/restore
work when executing substatements.
This makes the logic hopefully clearer, less work is is needed in
mysql_insert().
By cleaning up, using different variables for different purposes (instead
of one for 3 things...), we fix those bugs, which someone may want to fix
in 5.0 too:
BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate
statement-based"
BUG#20341 "stored function inserting into one auto_increment puts bad
data in slave"
BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE"
(now if a row is updated, LAST_INSERT_ID() will return its id)
and re-fixes:
BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT"
(already fixed differently by Ramil in 4.1)
Test of documented behaviour of mysql_insert_id() (there was no test).
The behaviour changes introduced are:
- LAST_INSERT_ID() now returns "the first autogenerated auto_increment value
successfully inserted", instead of "the first autogenerated auto_increment
value if any row was successfully inserted", see auto_increment.test.
Same for mysql_insert_id(), see mysql_client_test.c.
- LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY
UPDATE, see auto_increment.test. Same for mysql_insert_id(), see
mysql_client_test.c.
- LAST_INSERT_ID() does not change if no autogenerated value was successfully
inserted (it used to then be 0), see auto_increment.test.
- if in INSERT SELECT no autogenerated value was successfully inserted,
mysql_insert_id() now returns the id of the last inserted row (it already
did this for INSERT VALUES), see mysql_client_test.c.
- if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X
(it already did this for INSERT VALUES), see mysql_client_test.c.
- NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE,
the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID
influences not only the first row now.
Additionally, when unlocking a table we check that the thread is not keeping
a next_insert_id (as the table is unlocked that id is potentially out-of-date);
forgetting about this next_insert_id is done in a new
handler::ha_release_auto_increment().
Finally we prepare for engines capable of reserving finite-length intervals
of auto_increment values: we store such intervals in THD. The next step
(to be done by the replication team in 5.1) is to read those intervals from
THD and actually store them in the statement-based binary log. NDB
will be a good engine to test that.
2006-07-09 17:52:19 +02:00
|
|
|
ulonglong autoinc_value_of_last_inserted_row; // autogenerated or not
|
2000-07-31 21:29:14 +02:00
|
|
|
COPY_INFO info;
|
2004-07-16 00:15:55 +02:00
|
|
|
bool insert_into_view;
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2004-12-22 12:54:39 +01:00
|
|
|
select_insert(TABLE_LIST *table_list_par,
|
|
|
|
TABLE *table_par, List<Item> *fields_par,
|
2004-12-13 13:26:28 +01:00
|
|
|
List<Item> *update_fields, List<Item> *update_values,
|
2005-01-03 22:04:52 +01:00
|
|
|
enum_duplicates duplic, bool ignore);
|
2000-07-31 21:29:14 +02:00
|
|
|
~select_insert();
|
2002-05-08 22:14:40 +02:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2005-01-19 21:20:55 +01:00
|
|
|
int prepare2(void);
|
2000-07-31 21:29:14 +02:00
|
|
|
bool send_data(List<Item> &items);
|
2004-12-03 15:02:29 +01:00
|
|
|
virtual void store_values(List<Item> &values);
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
virtual bool can_rollback_data() { return 0; }
|
2000-07-31 21:29:14 +02:00
|
|
|
void send_error(uint errcode,const char *err);
|
|
|
|
bool send_eof();
|
2004-08-24 18:17:11 +02:00
|
|
|
/* not implemented: select_insert is never re-used in prepared statements */
|
|
|
|
void cleanup();
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
2001-08-02 05:29:50 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
class select_create: public select_insert {
|
|
|
|
ORDER *group;
|
2004-07-16 00:15:55 +02:00
|
|
|
TABLE_LIST *create_table;
|
2007-05-29 17:13:17 +02:00
|
|
|
TABLE_LIST *select_tables;
|
2000-07-31 21:29:14 +02:00
|
|
|
HA_CREATE_INFO *create_info;
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
Alter_info *alter_info;
|
2000-07-31 21:29:14 +02:00
|
|
|
Field **field;
|
|
|
|
public:
|
2007-01-27 02:46:45 +01:00
|
|
|
select_create (TABLE_LIST *table_arg,
|
2004-07-16 00:15:55 +02:00
|
|
|
HA_CREATE_INFO *create_info_par,
|
|
|
|
List<create_field> &fields_par,
|
|
|
|
List<Key> &keys_par,
|
2007-05-29 17:13:17 +02:00
|
|
|
List<Item> &select_fields,enum_duplicates duplic, bool ignore,
|
|
|
|
TABLE_LIST *select_tables_arg)
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
:select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore),
|
5.1 version of a fix and test cases for bugs:
Bug#4968 ""Stored procedure crash if cursor opened on altered table"
Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing"
Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from
stored procedure."
Bug#19733 "Repeated alter, or repeated create/drop, fails"
Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server"
Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a
growing key length" (this bug is not fixed in 5.0)
Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE
statements in stored routines or as prepared statements caused
incorrect results (and crashes in versions prior to 5.0.25).
In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE
SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options).
The problem of bugs 4968, 19733, 19282 and 6895 was that functions
mysql_prepare_table, mysql_create_table and mysql_alter_table are not
re-execution friendly: during their operation they modify contents
of LEX (members create_info, alter_info, key_list, create_list),
thus making the LEX unusable for the next execution.
In particular, these functions removed processed columns and keys from
create_list, key_list and drop_list. Search the code in sql_table.cc
for drop_it.remove() and similar patterns to find evidence.
The fix is to supply to these functions a usable copy of each of the
above structures at every re-execution of an SQL statement.
To simplify memory management, LEX::key_list and LEX::create_list
were added to LEX::alter_info, a fresh copy of which is created for
every execution.
The problem of crashing bug 22060 stemmed from the fact that the above
metnioned functions were not only modifying HA_CREATE_INFO structure
in LEX, but also were changing it to point to areas in volatile memory
of the execution memory root.
The patch solves this problem by creating and using an on-stack
copy of HA_CREATE_INFO in mysql_execute_command.
Additionally, this patch splits the part of mysql_alter_table
that analizes and rewrites information from the parser into
a separate function - mysql_prepare_alter_table, in analogy with
mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
|
|
|
create_table(table_arg),
|
2007-05-29 17:13:17 +02:00
|
|
|
create_info(create_info_par), select_tables(select_tables_arg)
|
2000-07-31 21:29:14 +02:00
|
|
|
{}
|
2002-05-08 22:14:40 +02:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2006-06-16 01:15:19 +02:00
|
|
|
|
2006-02-16 08:30:53 +01:00
|
|
|
void binlog_show_create_table(TABLE **tables, uint count);
|
2004-12-03 15:02:29 +01:00
|
|
|
void store_values(List<Item> &values);
|
2004-12-03 00:05:11 +01:00
|
|
|
void send_error(uint errcode,const char *err);
|
2000-07-31 21:29:14 +02:00
|
|
|
bool send_eof();
|
|
|
|
void abort();
|
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes
Changes that requires code changes in other code of other storage engines.
(Note that all changes are very straightforward and one should find all issues
by compiling a --debug build and fixing all compiler errors and all
asserts in field.cc while running the test suite),
- New optional handler function introduced: reset()
This is called after every DML statement to make it easy for a handler to
statement specific cleanups.
(The only case it's not called is if force the file to be closed)
- handler::extra(HA_EXTRA_RESET) is removed. Code that was there before
should be moved to handler::reset()
- table->read_set contains a bitmap over all columns that are needed
in the query. read_row() and similar functions only needs to read these
columns
- table->write_set contains a bitmap over all columns that will be updated
in the query. write_row() and update_row() only needs to update these
columns.
The above bitmaps should now be up to date in all context
(including ALTER TABLE, filesort()).
The handler is informed of any changes to the bitmap after
fix_fields() by calling the virtual function
handler::column_bitmaps_signal(). If the handler does caching of
these bitmaps (instead of using table->read_set, table->write_set),
it should redo the caching in this code. as the signal() may be sent
several times, it's probably best to set a variable in the signal
and redo the caching on read_row() / write_row() if the variable was
set.
- Removed the read_set and write_set bitmap objects from the handler class
- Removed all column bit handling functions from the handler class.
(Now one instead uses the normal bitmap functions in my_bitmap.c instead
of handler dedicated bitmap functions)
- field->query_id is removed. One should instead instead check
table->read_set and table->write_set if a field is used in the query.
- handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and
handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now
instead use table->read_set to check for which columns to retrieve.
- If a handler needs to call Field->val() or Field->store() on columns
that are not used in the query, one should install a temporary
all-columns-used map while doing so. For this, we provide the following
functions:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
field->val();
dbug_tmp_restore_column_map(table->read_set, old_map);
and similar for the write map:
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
field->val();
dbug_tmp_restore_column_map(table->write_set, old_map);
If this is not done, you will sooner or later hit a DBUG_ASSERT
in the field store() / val() functions.
(For not DBUG binaries, the dbug_tmp_restore_column_map() and
dbug_tmp_restore_column_map() are inline dummy functions and should
be optimized away be the compiler).
- If one needs to temporary set the column map for all binaries (and not
just to avoid the DBUG_ASSERT() in the Field::store() / Field::val()
methods) one should use the functions tmp_use_all_columns() and
tmp_restore_column_map() instead of the above dbug_ variants.
- All 'status' fields in the handler base class (like records,
data_file_length etc) are now stored in a 'stats' struct. This makes
it easier to know what status variables are provided by the base
handler. This requires some trivial variable names in the extra()
function.
- New virtual function handler::records(). This is called to optimize
COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true.
(stats.records is not supposed to be an exact value. It's only has to
be 'reasonable enough' for the optimizer to be able to choose a good
optimization path).
- Non virtual handler::init() function added for caching of virtual
constants from engine.
- Removed has_transactions() virtual method. Now one should instead return
HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support
transactions.
- The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument
that is to be used with 'new handler_name()' to allocate the handler
in the right area. The xxxx_create_handler() function is also
responsible for any initialization of the object before returning.
For example, one should change:
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
->
static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
return new (mem_root) ha_myisam(table);
}
- New optional virtual function: use_hidden_primary_key().
This is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
but we don't have a primary key. This allows the handler to take precisions
in remembering any hidden primary key to able to update/delete any
found row. The default handler marks all columns to be read.
- handler::table_flags() now returns a ulonglong (to allow for more flags).
- New/changed table_flags()
- HA_HAS_RECORDS Set if ::records() is supported
- HA_NO_TRANSACTIONS Set if engine doesn't support transactions
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE
Set if we should mark all primary key columns for
read when reading rows as part of a DELETE
statement. If there is no primary key,
all columns are marked for read.
- HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some
cases (based on table->read_set)
- HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION.
- HA_DUPP_POS Renamed to HA_DUPLICATE_POS
- HA_REQUIRES_KEY_COLUMNS_FOR_DELETE
Set this if we should mark ALL key columns for
read when when reading rows as part of a DELETE
statement. In case of an update we will mark
all keys for read for which key part changed
value.
- HA_STATS_RECORDS_IS_EXACT
Set this if stats.records is exact.
(This saves us some extra records() calls
when optimizing COUNT(*))
- Removed table_flags()
- HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if
handler::records() gives an exact count() and
HA_STATS_RECORDS_IS_EXACT if stats.records is exact.
- HA_READ_RND_SAME Removed (no one supported this one)
- Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk()
- Renamed handler::dupp_pos to handler::dup_pos
- Removed not used variable handler::sortkey
Upper level handler changes:
- ha_reset() now does some overall checks and calls ::reset()
- ha_table_flags() added. This is a cached version of table_flags(). The
cache is updated on engine creation time and updated on open.
MySQL level changes (not obvious from the above):
- DBUG_ASSERT() added to check that column usage matches what is set
in the column usage bit maps. (This found a LOT of bugs in current
column marking code).
- In 5.1 before, all used columns was marked in read_set and only updated
columns was marked in write_set. Now we only mark columns for which we
need a value in read_set.
- Column bitmaps are created in open_binary_frm() and open_table_from_share().
(Before this was in table.cc)
- handler::table_flags() calls are replaced with handler::ha_table_flags()
- For calling field->val() you must have the corresponding bit set in
table->read_set. For calling field->store() you must have the
corresponding bit set in table->write_set. (There are asserts in
all store()/val() functions to catch wrong usage)
- thd->set_query_id is renamed to thd->mark_used_columns and instead
of setting this to an integer value, this has now the values:
MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE
Changed also all variables named 'set_query_id' to mark_used_columns.
- In filesort() we now inform the handler of exactly which columns are needed
doing the sort and choosing the rows.
- The TABLE_SHARE object has a 'all_set' column bitmap one can use
when one needs a column bitmap with all columns set.
(This is used for table->use_all_columns() and other places)
- The TABLE object has 3 column bitmaps:
- def_read_set Default bitmap for columns to be read
- def_write_set Default bitmap for columns to be written
- tmp_set Can be used as a temporary bitmap when needed.
The table object has also two pointer to bitmaps read_set and write_set
that the handler should use to find out which columns are used in which way.
- count() optimization now calls handler::records() instead of using
handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true).
- Added extra argument to Item::walk() to indicate if we should also
traverse sub queries.
- Added TABLE parameter to cp_buffer_from_ref()
- Don't close tables created with CREATE ... SELECT but keep them in
the table cache. (Faster usage of newly created tables).
New interfaces:
- table->clear_column_bitmaps() to initialize the bitmaps for tables
at start of new statements.
- table->column_bitmaps_set() to set up new column bitmaps and signal
the handler about this.
- table->column_bitmaps_set_no_signal() for some few cases where we need
to setup new column bitmaps but don't signal the handler (as the handler
has already been signaled about these before). Used for the momement
only in opt_range.cc when doing ROR scans.
- table->use_all_columns() to install a bitmap where all columns are marked
as use in the read and the write set.
- table->default_column_bitmaps() to install the normal read and write
column bitmaps, but not signaling the handler about this.
This is mainly used when creating TABLE instances.
- table->mark_columns_needed_for_delete(),
table->mark_columns_needed_for_delete() and
table->mark_columns_needed_for_insert() to allow us to put additional
columns in column usage maps if handler so requires.
(The handler indicates what it neads in handler->table_flags())
- table->prepare_for_position() to allow us to tell handler that it
needs to read primary key parts to be able to store them in
future table->position() calls.
(This replaces the table->file->ha_retrieve_all_pk function)
- table->mark_auto_increment_column() to tell handler are going to update
columns part of any auto_increment key.
- table->mark_columns_used_by_index() to mark all columns that is part of
an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow
it to quickly know that it only needs to read colums that are part
of the key. (The handler can also use the column map for detecting this,
but simpler/faster handler can just monitor the extra() call).
- table->mark_columns_used_by_index_no_reset() to in addition to other columns,
also mark all columns that is used by the given key.
- table->restore_column_maps_after_mark_index() to restore to default
column maps after a call to table->mark_columns_used_by_index().
- New item function register_field_in_read_map(), for marking used columns
in table->read_map. Used by filesort() to mark all used columns
- Maintain in TABLE->merge_keys set of all keys that are used in query.
(Simplices some optimization loops)
- Maintain Field->part_of_key_not_clustered which is like Field->part_of_key
but the field in the clustered key is not assumed to be part of all index.
(used in opt_range.cc for faster loops)
- dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map()
tmp_use_all_columns() and tmp_restore_column_map() functions to temporally
mark all columns as usable. The 'dbug_' version is primarily intended
inside a handler when it wants to just call Field:store() & Field::val()
functions, but don't need the column maps set for any other usage.
(ie:: bitmap_is_set() is never called)
- We can't use compare_records() to skip updates for handlers that returns
a partial column set and the read_set doesn't cover all columns in the
write set. The reason for this is that if we have a column marked only for
write we can't in the MySQL level know if the value changed or not.
The reason this worked before was that MySQL marked all to be written
columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden
bug'.
- open_table_from_share() does not anymore setup temporary MEM_ROOT
object as a thread specific variable for the handler. Instead we
send the to-be-used MEMROOT to get_new_handler().
(Simpler, faster code)
Bugs fixed:
- Column marking was not done correctly in a lot of cases.
(ALTER TABLE, when using triggers, auto_increment fields etc)
(Could potentially result in wrong values inserted in table handlers
relying on that the old column maps or field->set_query_id was correct)
Especially when it comes to triggers, there may be cases where the
old code would cause lost/wrong values for NDB and/or InnoDB tables.
- Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags:
OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG.
This allowed me to remove some wrong warnings about:
"Some non-transactional changed tables couldn't be rolled back"
- Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset
(thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose
some warnings about
"Some non-transactional changed tables couldn't be rolled back")
- Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table()
which could cause delete_table to report random failures.
- Fixed core dumps for some tests when running with --debug
- Added missing FN_LIBCHAR in mysql_rm_tmp_tables()
(This has probably caused us to not properly remove temporary files after
crash)
- slow_logs was not properly initialized, which could maybe cause
extra/lost entries in slow log.
- If we get an duplicate row on insert, change column map to read and
write all columns while retrying the operation. This is required by
the definition of REPLACE and also ensures that fields that are only
part of UPDATE are properly handled. This fixed a bug in NDB and
REPLACE where REPLACE wrongly copied some column values from the replaced
row.
- For table handler that doesn't support NULL in keys, we would give an error
when creating a primary key with NULL fields, even after the fields has been
automaticly converted to NOT NULL.
- Creating a primary key on a SPATIAL key, would fail if field was not
declared as NOT NULL.
Cleanups:
- Removed not used condition argument to setup_tables
- Removed not needed item function reset_query_id_processor().
- Field->add_index is removed. Now this is instead maintained in
(field->flags & FIELD_IN_ADD_INDEX)
- Field->fieldnr is removed (use field->field_index instead)
- New argument to filesort() to indicate that it should return a set of
row pointers (not used columns). This allowed me to remove some references
to sql_command in filesort and should also enable us to return column
results in some cases where we couldn't before.
- Changed column bitmap handling in opt_range.cc to be aligned with TABLE
bitmap, which allowed me to use bitmap functions instead of looping over
all fields to create some needed bitmaps. (Faster and smaller code)
- Broke up found too long lines
- Moved some variable declaration at start of function for better code
readability.
- Removed some not used arguments from functions.
(setup_fields(), mysql_prepare_insert_check_table())
- setup_fields() now takes an enum instead of an int for marking columns
usage.
- For internal temporary tables, use handler::write_row(),
handler::delete_row() and handler::update_row() instead of
handler::ha_xxxx() for faster execution.
- Changed some constants to enum's and define's.
- Using separate column read and write sets allows for easier checking
of timestamp field was set by statement.
- Remove calls to free_io_cache() as this is now done automaticly in ha_reset()
- Don't build table->normalized_path as this is now identical to table->path
(after bar's fixes to convert filenames)
- Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to
do comparision with the 'convert-dbug-for-diff' tool.
Things left to do in 5.1:
- We wrongly log failed CREATE TABLE ... SELECT in some cases when using
row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result)
Mats has promised to look into this.
- Test that my fix for CREATE TABLE ... SELECT is indeed correct.
(I added several test cases for this, but in this case it's better that
someone else also tests this throughly).
Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
|
|
|
virtual bool can_rollback_data() { return 1; }
|
|
|
|
|
2006-03-24 12:24:31 +01:00
|
|
|
// Needed for access from local class MY_HOOKS in prepare(), since thd is proteted.
|
2006-06-20 10:40:36 +02:00
|
|
|
const THD *get_thd(void) { return thd; }
|
|
|
|
const HA_CREATE_INFO *get_create_info() { return create_info; };
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
2003-04-26 14:12:14 +02:00
|
|
|
#include <myisam.h>
|
|
|
|
|
2005-02-12 20:58:54 +01:00
|
|
|
/*
|
|
|
|
Param to create temporary tables when doing SELECT:s
|
|
|
|
NOTE
|
|
|
|
This structure is copied using memcpy as a part of JOIN.
|
|
|
|
*/
|
2003-04-26 14:12:14 +02:00
|
|
|
|
|
|
|
class TMP_TABLE_PARAM :public Sql_alloc
|
|
|
|
{
|
2003-11-28 11:18:13 +01:00
|
|
|
private:
|
|
|
|
/* Prevent use of these (not safe because of lists and copy_field) */
|
|
|
|
TMP_TABLE_PARAM(const TMP_TABLE_PARAM &);
|
|
|
|
void operator=(TMP_TABLE_PARAM &);
|
|
|
|
|
|
|
|
public:
|
2003-04-26 14:12:14 +02:00
|
|
|
List<Item> copy_funcs;
|
|
|
|
List<Item> save_copy_funcs;
|
|
|
|
Copy_field *copy_field, *copy_field_end;
|
|
|
|
Copy_field *save_copy_field, *save_copy_field_end;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar *group_buff;
|
2003-04-26 14:12:14 +02:00
|
|
|
Item **items_to_copy; /* Fields in tmp table */
|
|
|
|
MI_COLUMNDEF *recinfo,*start_recinfo;
|
|
|
|
KEY *keyinfo;
|
|
|
|
ha_rows end_write_records;
|
|
|
|
uint field_count,sum_func_count,func_count;
|
|
|
|
uint hidden_field_count;
|
|
|
|
uint group_parts,group_length,group_null_parts;
|
|
|
|
uint quick_group;
|
|
|
|
bool using_indirect_summary_function;
|
2004-10-10 09:10:53 +02:00
|
|
|
/* If >0 convert all blob fields to varchar(convert_blob_length) */
|
|
|
|
uint convert_blob_length;
|
2004-11-18 10:16:06 +01:00
|
|
|
CHARSET_INFO *table_charset;
|
2005-02-23 13:15:36 +01:00
|
|
|
bool schema_table;
|
2005-11-30 11:52:12 +01:00
|
|
|
/*
|
|
|
|
True if GROUP BY and its aggregate functions are already computed
|
|
|
|
by a table access method (e.g. by loose index scan). In this case
|
|
|
|
query execution should not perform aggregation and should treat
|
|
|
|
aggregate functions as normal functions.
|
|
|
|
*/
|
|
|
|
bool precomputed_group_by;
|
2006-03-29 21:30:34 +02:00
|
|
|
bool force_copy_fields;
|
2003-04-26 14:12:14 +02:00
|
|
|
|
|
|
|
TMP_TABLE_PARAM()
|
2005-02-12 20:58:54 +01:00
|
|
|
:copy_field(0), group_parts(0),
|
2005-08-02 21:09:49 +02:00
|
|
|
group_length(0), group_null_parts(0), convert_blob_length(0),
|
2006-03-30 15:14:55 +02:00
|
|
|
schema_table(0), precomputed_group_by(0), force_copy_fields(0)
|
2003-04-26 14:12:14 +02:00
|
|
|
{}
|
|
|
|
~TMP_TABLE_PARAM()
|
|
|
|
{
|
|
|
|
cleanup();
|
|
|
|
}
|
2003-11-28 11:18:13 +01:00
|
|
|
void init(void);
|
2003-04-26 14:12:14 +02:00
|
|
|
inline void cleanup(void)
|
|
|
|
{
|
|
|
|
if (copy_field) /* Fix for Intel compiler */
|
|
|
|
{
|
|
|
|
delete [] copy_field;
|
2004-11-15 23:16:04 +01:00
|
|
|
save_copy_field= copy_field= 0;
|
2003-04-26 14:12:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2005-09-22 00:11:21 +02:00
|
|
|
class select_union :public select_result_interceptor
|
|
|
|
{
|
2003-04-26 14:12:14 +02:00
|
|
|
TMP_TABLE_PARAM tmp_table_param;
|
2005-09-22 00:11:21 +02:00
|
|
|
public:
|
|
|
|
TABLE *table;
|
2001-08-02 05:29:50 +02:00
|
|
|
|
2005-09-22 00:11:21 +02:00
|
|
|
select_union() :table(0) {}
|
2002-05-08 22:14:40 +02:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2001-08-02 05:29:50 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
bool send_eof();
|
|
|
|
bool flush();
|
2005-09-22 00:11:21 +02:00
|
|
|
|
|
|
|
bool create_result_table(THD *thd, List<Item> *column_types,
|
|
|
|
bool is_distinct, ulonglong options,
|
|
|
|
const char *alias);
|
2001-08-02 05:29:50 +02:00
|
|
|
};
|
|
|
|
|
2002-06-19 16:52:44 +02:00
|
|
|
/* Base subselect interface class */
|
2004-10-21 16:33:53 +02:00
|
|
|
class select_subselect :public select_result_interceptor
|
2002-05-12 22:46:42 +02:00
|
|
|
{
|
2002-06-19 16:52:44 +02:00
|
|
|
protected:
|
2002-05-12 22:46:42 +02:00
|
|
|
Item_subselect *item;
|
|
|
|
public:
|
|
|
|
select_subselect(Item_subselect *item);
|
2002-06-19 16:52:44 +02:00
|
|
|
bool send_data(List<Item> &items)=0;
|
2002-05-12 22:46:42 +02:00
|
|
|
bool send_eof() { return 0; };
|
|
|
|
};
|
|
|
|
|
2002-06-19 16:52:44 +02:00
|
|
|
/* Single value subselect interface class */
|
2002-12-19 20:15:09 +01:00
|
|
|
class select_singlerow_subselect :public select_subselect
|
2002-06-19 16:52:44 +02:00
|
|
|
{
|
|
|
|
public:
|
2006-12-14 23:51:37 +01:00
|
|
|
select_singlerow_subselect(Item_subselect *item_arg)
|
|
|
|
:select_subselect(item_arg)
|
|
|
|
{}
|
2002-06-19 16:52:44 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
};
|
|
|
|
|
2003-08-12 11:38:03 +02:00
|
|
|
/* used in independent ALL/ANY optimisation */
|
|
|
|
class select_max_min_finder_subselect :public select_subselect
|
|
|
|
{
|
|
|
|
Item_cache *cache;
|
|
|
|
bool (select_max_min_finder_subselect::*op)();
|
|
|
|
bool fmax;
|
|
|
|
public:
|
2006-12-14 23:51:37 +01:00
|
|
|
select_max_min_finder_subselect(Item_subselect *item_arg, bool mx)
|
|
|
|
:select_subselect(item_arg), cache(0), fmax(mx)
|
2003-08-12 11:38:03 +02:00
|
|
|
{}
|
2005-02-11 22:33:52 +01:00
|
|
|
void cleanup();
|
2003-08-12 11:38:03 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
bool cmp_real();
|
|
|
|
bool cmp_int();
|
2005-02-11 22:33:52 +01:00
|
|
|
bool cmp_decimal();
|
2003-08-12 11:38:03 +02:00
|
|
|
bool cmp_str();
|
|
|
|
};
|
|
|
|
|
2002-06-19 16:52:44 +02:00
|
|
|
/* EXISTS subselect interface class */
|
|
|
|
class select_exists_subselect :public select_subselect
|
|
|
|
{
|
|
|
|
public:
|
2006-12-14 23:51:37 +01:00
|
|
|
select_exists_subselect(Item_subselect *item_arg)
|
|
|
|
:select_subselect(item_arg){}
|
2002-06-19 16:52:44 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
};
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/* Structs used when sorting */
|
|
|
|
|
|
|
|
typedef struct st_sort_field {
|
|
|
|
Field *field; /* Field to sort */
|
|
|
|
Item *item; /* Item if not sorting fields */
|
|
|
|
uint length; /* Length of sort field */
|
2005-10-13 23:04:52 +02:00
|
|
|
uint suffix_length; /* Length suffix (0-4) */
|
2000-07-31 21:29:14 +02:00
|
|
|
Item_result result_type; /* Type of item */
|
2003-02-12 20:55:37 +01:00
|
|
|
bool reverse; /* if descending sort */
|
|
|
|
bool need_strxnfrm; /* If we have to use strxnfrm() */
|
2000-07-31 21:29:14 +02:00
|
|
|
} SORT_FIELD;
|
|
|
|
|
|
|
|
|
|
|
|
typedef struct st_sort_buffer {
|
|
|
|
uint index; /* 0 or 1 */
|
|
|
|
uint sort_orders;
|
|
|
|
uint change_pos; /* If sort-fields changed */
|
|
|
|
char **buff;
|
|
|
|
SORT_FIELD *sortorder;
|
|
|
|
} SORT_BUFFER;
|
|
|
|
|
|
|
|
/* Structure for db & table in sql_yacc */
|
|
|
|
|
2003-02-12 20:55:37 +01:00
|
|
|
class Table_ident :public Sql_alloc
|
|
|
|
{
|
A fix and a test case for
Bug#19022 "Memory bug when switching db during trigger execution"
Bug#17199 "Problem when view calls function from another database."
Bug#18444 "Fully qualified stored function names don't work correctly in
SELECT statements"
Documentation note: this patch introduces a change in behaviour of prepared
statements.
This patch adds a few new invariants with regard to how THD::db should
be used. These invariants should be preserved in future:
- one should never refer to THD::db by pointer and always make a deep copy
(strmake, strdup)
- one should never compare two databases by pointer, but use strncmp or
my_strncasecmp
- TABLE_LIST object table->db should be always initialized in the parser or
by creator of the object.
For prepared statements it means that if the current database is changed
after a statement is prepared, the database that was current at prepare
remains active. This also means that you can not prepare a statement that
implicitly refers to the current database if the latter is not set.
This is not documented, and therefore needs documentation. This is NOT a
change in behavior for almost all SQL statements except:
- ALTER TABLE t1 RENAME t2
- OPTIMIZE TABLE t1
- ANALYZE TABLE t1
- TRUNCATE TABLE t1 --
until this patch t1 or t2 could be evaluated at the first execution of
prepared statement.
CURRENT_DATABASE() still works OK and is evaluated at every execution
of prepared statement.
Note, that in stored routines this is not an issue as the default
database is the database of the stored procedure and "use" statement
is prohibited in stored routines.
This patch makes obsolete the use of check_db_used (it was never used in the
old code too) and all other places that check for table->db and assign it
from THD::db if it's NULL, except the parser.
How this patch was created: THD::{db,db_length} were replaced with a
LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were
manually checked and:
- if the place uses thd->db by pointer, it was fixed to make a deep copy
- if a place compared two db pointers, it was fixed to compare them by value
(via strcmp/my_strcasecmp, whatever was approproate)
Then this intermediate patch was used to write a smaller patch that does the
same thing but without a rename.
TODO in 5.1:
- remove check_db_used
- deploy THD::set_db in mysql_change_db
See also comments to individual files.
2006-06-26 22:47:52 +02:00
|
|
|
public:
|
2000-07-31 21:29:14 +02:00
|
|
|
LEX_STRING db;
|
|
|
|
LEX_STRING table;
|
2002-05-06 23:04:16 +02:00
|
|
|
SELECT_LEX_UNIT *sel;
|
2003-04-02 15:16:19 +02:00
|
|
|
inline Table_ident(THD *thd, LEX_STRING db_arg, LEX_STRING table_arg,
|
|
|
|
bool force)
|
2002-05-06 23:04:16 +02:00
|
|
|
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2003-04-02 15:16:19 +02:00
|
|
|
if (!force && (thd->client_capabilities & CLIENT_NO_SCHEMA))
|
2000-07-31 21:29:14 +02:00
|
|
|
db.str=0;
|
|
|
|
else
|
|
|
|
db= db_arg;
|
|
|
|
}
|
2002-05-06 23:04:16 +02:00
|
|
|
inline Table_ident(LEX_STRING table_arg)
|
|
|
|
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
|
|
|
|
{
|
|
|
|
db.str=0;
|
|
|
|
}
|
2006-07-19 20:33:19 +02:00
|
|
|
/*
|
|
|
|
This constructor is used only for the case when we create a derived
|
|
|
|
table. A derived table has no name and doesn't belong to any database.
|
|
|
|
Later, if there was an alias specified for the table, it will be set
|
|
|
|
by add_table_to_list.
|
|
|
|
*/
|
2006-04-13 09:50:33 +02:00
|
|
|
inline Table_ident(SELECT_LEX_UNIT *s) : sel(s)
|
2002-05-06 23:04:16 +02:00
|
|
|
{
|
2003-04-02 15:16:19 +02:00
|
|
|
/* We must have a table name here as this is used with add_table_to_list */
|
2006-07-19 20:33:19 +02:00
|
|
|
db.str= empty_c_string; /* a subject to casedn_str */
|
|
|
|
db.length= 0;
|
|
|
|
table.str= internal_table_name;
|
|
|
|
table.length=1;
|
2002-05-06 23:04:16 +02:00
|
|
|
}
|
2006-07-19 20:33:19 +02:00
|
|
|
bool is_derived_table() const { return test(sel); }
|
2000-07-31 21:29:14 +02:00
|
|
|
inline void change_db(char *db_name)
|
2002-05-06 23:04:16 +02:00
|
|
|
{
|
|
|
|
db.str= db_name; db.length= (uint) strlen(db_name);
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// this is needed for user_vars hash
|
|
|
|
class user_var_entry
|
|
|
|
{
|
|
|
|
public:
|
2006-02-25 16:46:30 +01:00
|
|
|
user_var_entry() {} /* Remove gcc warning */
|
2000-07-31 21:29:14 +02:00
|
|
|
LEX_STRING name;
|
|
|
|
char *value;
|
2005-03-19 01:12:25 +01:00
|
|
|
ulong length;
|
|
|
|
query_id_t update_query_id, used_query_id;
|
2000-07-31 21:29:14 +02:00
|
|
|
Item_result type;
|
2006-06-09 19:35:54 +02:00
|
|
|
bool unsigned_flag;
|
2003-10-02 23:40:27 +02:00
|
|
|
|
2005-02-11 22:33:52 +01:00
|
|
|
double val_real(my_bool *null_value);
|
2003-10-02 23:40:27 +02:00
|
|
|
longlong val_int(my_bool *null_value);
|
|
|
|
String *val_str(my_bool *null_value, String *str, uint decimals);
|
2005-02-11 22:33:52 +01:00
|
|
|
my_decimal *val_decimal(my_bool *null_value, my_decimal *result);
|
2003-06-24 12:11:07 +02:00
|
|
|
DTCollation collation;
|
2000-07-31 21:29:14 +02:00
|
|
|
};
|
|
|
|
|
2003-12-19 17:04:03 +01:00
|
|
|
/*
|
|
|
|
Unique -- class for unique (removing of duplicates).
|
|
|
|
Puts all values to the TREE. If the tree becomes too big,
|
|
|
|
it's dumped to the file. User can request sorted values, or
|
|
|
|
just iterate through them. In the last case tree merging is performed in
|
|
|
|
memory simultaneously with iteration, so it should be ~2-3x faster.
|
|
|
|
*/
|
2001-05-23 22:47:08 +02:00
|
|
|
|
|
|
|
class Unique :public Sql_alloc
|
|
|
|
{
|
|
|
|
DYNAMIC_ARRAY file_ptrs;
|
2006-11-27 23:47:21 +01:00
|
|
|
ulong max_elements;
|
|
|
|
ulonglong max_in_memory_size;
|
2001-05-23 22:47:08 +02:00
|
|
|
IO_CACHE file;
|
|
|
|
TREE tree;
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
uchar *record_pointers;
|
2001-05-23 22:47:08 +02:00
|
|
|
bool flush();
|
2003-06-26 06:56:55 +02:00
|
|
|
uint size;
|
2001-05-23 22:47:08 +02:00
|
|
|
|
|
|
|
public:
|
|
|
|
ulong elements;
|
2003-12-19 17:04:03 +01:00
|
|
|
Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
|
2006-11-27 23:47:21 +01:00
|
|
|
uint size_arg, ulonglong max_in_memory_size_arg);
|
2001-05-23 22:47:08 +02:00
|
|
|
~Unique();
|
2005-03-15 01:46:19 +01:00
|
|
|
ulong elements_in_tree() { return tree.elements_in_tree; }
|
2003-12-19 17:04:03 +01:00
|
|
|
inline bool unique_add(void *ptr)
|
2001-05-23 22:47:08 +02:00
|
|
|
{
|
2005-02-11 22:33:52 +01:00
|
|
|
DBUG_ENTER("unique_add");
|
2006-11-20 21:42:06 +01:00
|
|
|
DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
|
2001-05-23 22:47:08 +02:00
|
|
|
if (tree.elements_in_tree > max_elements && flush())
|
2005-02-11 22:33:52 +01:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
|
2001-05-23 22:47:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool get(TABLE *table);
|
2003-12-19 22:53:14 +01:00
|
|
|
static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
|
2006-11-27 23:47:21 +01:00
|
|
|
ulonglong max_in_memory_size);
|
2003-12-19 22:53:14 +01:00
|
|
|
inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
|
2006-11-27 23:47:21 +01:00
|
|
|
ulonglong max_in_memory_size)
|
2003-12-19 22:53:14 +01:00
|
|
|
{
|
2006-11-27 23:47:21 +01:00
|
|
|
register ulonglong max_elems_in_tree=
|
2003-12-19 22:53:14 +01:00
|
|
|
(1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
|
2006-11-27 23:47:21 +01:00
|
|
|
return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
|
2003-12-19 22:53:14 +01:00
|
|
|
}
|
|
|
|
|
2003-12-19 17:04:03 +01:00
|
|
|
void reset();
|
|
|
|
bool walk(tree_walk_action action, void *walk_action_arg);
|
|
|
|
|
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done:
- Changed byte to uchar
- Changed gptr to uchar*
- Change my_string to char *
- Change my_size_t to size_t
- Change size_s to size_t
Removed declaration of byte, gptr, my_string, my_size_t and size_s.
Following function parameter changes was done:
- All string functions in mysys/strings was changed to use size_t
instead of uint for string lengths.
- All read()/write() functions changed to use size_t (including vio).
- All protocoll functions changed to use size_t instead of uint
- Functions that used a pointer to a string length was changed to use size_t*
- Changed malloc(), free() and related functions from using gptr to use void *
as this requires fewer casts in the code and is more in line with how the
standard functions work.
- Added extra length argument to dirname_part() to return the length of the
created string.
- Changed (at least) following functions to take uchar* as argument:
- db_dump()
- my_net_write()
- net_write_command()
- net_store_data()
- DBUG_DUMP()
- decimal2bin() & bin2decimal()
- Changed my_compress() and my_uncompress() to use size_t. Changed one
argument to my_uncompress() from a pointer to a value as we only return
one value (makes function easier to use).
- Changed type of 'pack_data' argument to packfrm() to avoid casts.
- Changed in readfrm() and writefrom(), ha_discover and handler::discover()
the type for argument 'frmdata' to uchar** to avoid casts.
- Changed most Field functions to use uchar* instead of char* (reduced a lot of
casts).
- Changed field->val_xxx(xxx, new_ptr) to take const pointers.
Other changes:
- Removed a lot of not needed casts
- Added a few new cast required by other changes
- Added some cast to my_multi_malloc() arguments for safety (as string lengths
needs to be uint, not size_t).
- Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done
explicitely as this conflict was often hided by casting the function to
hash_get_key).
- Changed some buffers to memory regions to uchar* to avoid casts.
- Changed some string lengths from uint to size_t.
- Changed field->ptr to be uchar* instead of char*. This allowed us to
get rid of a lot of casts.
- Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar
- Include zlib.h in some files as we needed declaration of crc32()
- Changed MY_FILE_ERROR to be (size_t) -1.
- Changed many variables to hold the result of my_read() / my_write() to be
size_t. This was needed to properly detect errors (which are
returned as (size_t) -1).
- Removed some very old VMS code
- Changed packfrm()/unpackfrm() to not be depending on uint size
(portability fix)
- Removed windows specific code to restore cursor position as this
causes slowdown on windows and we should not mix read() and pread()
calls anyway as this is not thread safe. Updated function comment to
reflect this. Changed function that depended on original behavior of
my_pwrite() to itself restore the cursor position (one such case).
- Added some missing checking of return value of malloc().
- Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow.
- Changed type of table_def::m_size from my_size_t to ulong to reflect that
m_size is the number of elements in the array, not a string/memory
length.
- Moved THD::max_row_length() to table.cc (as it's not depending on THD).
Inlined max_row_length_blob() into this function.
- More function comments
- Fixed some compiler warnings when compiled without partitions.
- Removed setting of LEX_STRING() arguments in declaration (portability fix).
- Some trivial indentation/variable name changes.
- Some trivial code simplifications:
- Replaced some calls to alloc_root + memcpy to use
strmake_root()/strdup_root().
- Changed some calls from memdup() to strmake() (Safety fix)
- Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
|
|
|
friend int unique_write_to_file(uchar* key, element_count count, Unique *unique);
|
|
|
|
friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique);
|
2001-05-23 22:47:08 +02:00
|
|
|
};
|
2001-06-07 13:10:58 +02:00
|
|
|
|
2003-08-11 21:44:43 +02:00
|
|
|
|
2004-10-21 16:33:53 +02:00
|
|
|
class multi_delete :public select_result_interceptor
|
2002-12-05 18:38:42 +01:00
|
|
|
{
|
|
|
|
TABLE_LIST *delete_tables, *table_being_deleted;
|
2003-08-11 21:44:43 +02:00
|
|
|
Unique **tempfiles;
|
2003-12-12 22:14:59 +01:00
|
|
|
ha_rows deleted, found;
|
2002-12-05 18:38:42 +01:00
|
|
|
uint num_of_tables;
|
|
|
|
int error;
|
2005-08-25 15:34:34 +02:00
|
|
|
bool do_delete;
|
|
|
|
/* True if at least one table we delete from is transactional */
|
|
|
|
bool transactional_tables;
|
|
|
|
/* True if at least one table we delete from is not transactional */
|
|
|
|
bool normal_tables;
|
|
|
|
bool delete_while_scanning;
|
2005-05-30 19:48:40 +02:00
|
|
|
|
2002-12-05 18:38:42 +01:00
|
|
|
public:
|
2005-08-09 22:23:56 +02:00
|
|
|
multi_delete(TABLE_LIST *dt, uint num_of_tables);
|
2002-12-05 18:38:42 +01:00
|
|
|
~multi_delete();
|
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
bool initialize_tables (JOIN *join);
|
|
|
|
void send_error(uint errcode,const char *err);
|
2005-05-30 19:48:40 +02:00
|
|
|
int do_deletes();
|
2002-12-05 18:38:42 +01:00
|
|
|
bool send_eof();
|
|
|
|
};
|
|
|
|
|
2001-06-07 13:10:58 +02:00
|
|
|
|
2004-10-21 16:33:53 +02:00
|
|
|
class multi_update :public select_result_interceptor
|
2002-11-29 15:40:18 +01:00
|
|
|
{
|
2004-09-15 22:42:56 +02:00
|
|
|
TABLE_LIST *all_tables; /* query/update command tables */
|
|
|
|
TABLE_LIST *leaves; /* list of leves of join table tree */
|
|
|
|
TABLE_LIST *update_tables, *table_being_updated;
|
2003-04-02 15:16:19 +02:00
|
|
|
TABLE **tmp_tables, *main_table, *table_to_update;
|
2002-11-29 15:40:18 +01:00
|
|
|
TMP_TABLE_PARAM *tmp_table_param;
|
|
|
|
ha_rows updated, found;
|
|
|
|
List <Item> *fields, *values;
|
|
|
|
List <Item> **fields_for_table, **values_for_table;
|
|
|
|
uint table_count;
|
2007-05-30 09:21:39 +02:00
|
|
|
/*
|
|
|
|
List of tables referenced in the CHECK OPTION condition of
|
|
|
|
the updated view excluding the updated table.
|
|
|
|
*/
|
|
|
|
List <TABLE> unupdated_check_opt_tables;
|
2002-11-29 15:40:18 +01:00
|
|
|
Copy_field *copy_field;
|
|
|
|
enum enum_duplicates handle_duplicates;
|
2005-08-25 15:34:34 +02:00
|
|
|
bool do_update, trans_safe;
|
|
|
|
/* True if the update operation has made a change in a transactional table */
|
|
|
|
bool transactional_tables;
|
|
|
|
bool ignore;
|
2002-11-29 15:40:18 +01:00
|
|
|
|
|
|
|
public:
|
2005-08-09 22:23:56 +02:00
|
|
|
multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list,
|
2004-09-15 22:42:56 +02:00
|
|
|
List<Item> *fields, List<Item> *values,
|
2005-01-03 22:04:52 +01:00
|
|
|
enum_duplicates handle_duplicates, bool ignore);
|
2002-11-29 15:40:18 +01:00
|
|
|
~multi_update();
|
2002-12-05 18:38:42 +01:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2002-11-29 15:40:18 +01:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
bool initialize_tables (JOIN *join);
|
|
|
|
void send_error(uint errcode,const char *err);
|
|
|
|
int do_updates (bool from_send_error);
|
|
|
|
bool send_eof();
|
|
|
|
};
|
2001-12-26 15:49:10 +01:00
|
|
|
|
2003-01-18 17:21:13 +01:00
|
|
|
class my_var : public Sql_alloc {
|
|
|
|
public:
|
|
|
|
LEX_STRING s;
|
2005-11-23 11:26:07 +01:00
|
|
|
#ifndef DBUG_OFF
|
2005-11-22 23:50:37 +01:00
|
|
|
/*
|
|
|
|
Routine to which this Item_splocal belongs. Used for checking if correct
|
|
|
|
runtime context is used for variable handling.
|
|
|
|
*/
|
2005-12-07 15:01:17 +01:00
|
|
|
sp_head *sp;
|
2005-11-22 23:50:37 +01:00
|
|
|
#endif
|
2003-01-18 17:21:13 +01:00
|
|
|
bool local;
|
|
|
|
uint offset;
|
2003-10-14 12:59:28 +02:00
|
|
|
enum_field_types type;
|
|
|
|
my_var (LEX_STRING& j, bool i, uint o, enum_field_types t)
|
|
|
|
:s(j), local(i), offset(o), type(t)
|
|
|
|
{}
|
2003-01-18 17:21:13 +01:00
|
|
|
~my_var() {}
|
|
|
|
};
|
2001-12-26 15:49:10 +01:00
|
|
|
|
2004-10-21 16:33:53 +02:00
|
|
|
class select_dumpvar :public select_result_interceptor {
|
2002-10-11 20:49:10 +02:00
|
|
|
ha_rows row_count;
|
|
|
|
public:
|
2003-01-18 17:21:13 +01:00
|
|
|
List<my_var> var_list;
|
2006-11-28 23:21:39 +01:00
|
|
|
select_dumpvar() { var_list.empty(); row_count= 0;}
|
2002-10-11 20:49:10 +02:00
|
|
|
~select_dumpvar() {}
|
2002-10-16 15:55:08 +02:00
|
|
|
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
|
2002-10-11 20:49:10 +02:00
|
|
|
bool send_data(List<Item> &items);
|
|
|
|
bool send_eof();
|
2006-12-01 11:25:06 +01:00
|
|
|
virtual bool check_simple_select() const;
|
2004-08-24 18:17:11 +02:00
|
|
|
void cleanup();
|
2002-10-11 20:49:10 +02:00
|
|
|
};
|
2004-09-13 15:48:01 +02:00
|
|
|
|
2006-06-20 12:20:32 +02:00
|
|
|
/* Bits in sql_command_flags */
|
|
|
|
|
|
|
|
#define CF_CHANGES_DATA 1
|
|
|
|
#define CF_HAS_ROW_COUNT 2
|
|
|
|
#define CF_STATUS_COMMAND 4
|
|
|
|
#define CF_SHOW_TABLE_COMMAND 8
|
|
|
|
|
2004-09-13 15:48:01 +02:00
|
|
|
/* Functions in sql_class.cc */
|
|
|
|
|
|
|
|
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
|
2006-06-20 12:20:32 +02:00
|
|
|
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
|
|
|
|
STATUS_VAR *dec_var);
|
2006-01-19 22:40:56 +01:00
|
|
|
#endif /* MYSQL_SERVER */
|