mariadb/sql/sql_class.h

3732 lines
120 KiB
C
Raw Normal View History

/* Copyright (C) 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
2000-07-31 21:29:14 +02:00
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
2000-07-31 21:29:14 +02:00
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
2000-07-31 21:29:14 +02:00
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef SQL_CLASS_INCLUDED
#define SQL_CLASS_INCLUDED
2000-07-31 21:29:14 +02:00
/* Classes in mysql */
#ifdef USE_PRAGMA_INTERFACE
2000-07-31 21:29:14 +02:00
#pragma interface /* gcc class implementation */
#endif
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
#ifdef MYSQL_SERVER
#include "unireg.h" // REQUIRED: for other includes
#endif
#include "sql_const.h"
#include <mysql/plugin_audit.h>
#include "log.h"
#include "rpl_tblmap.h"
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
#include "mdl.h"
#include "sql_locale.h" /* my_locale_st */
#include "sql_profile.h" /* PROFILING */
#include "scheduler.h" /* thd_scheduler */
#include "protocol.h" /* Protocol_text, Protocol_binary */
#include "violite.h" /* vio_is_connected */
#include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA,
THR_LOCK_INFO, THR_LOCK_OWNER */
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
class Reprepare_observer;
class Relay_log_info;
2000-07-31 21:29:14 +02:00
class Query_log_event;
class Load_log_event;
class Slave_log_event;
class sp_rcontext;
class sp_cache;
class Parser_state;
class Rows_log_event;
class Sroutine_hash_entry;
class User_level_lock;
class user_var_entry;
2000-07-31 21:29:14 +02:00
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
2004-05-19 00:18:54 +02:00
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
enum enum_slave_exec_mode { SLAVE_EXEC_MODE_STRICT,
SLAVE_EXEC_MODE_IDEMPOTENT,
SLAVE_EXEC_MODE_LAST_BIT};
2010-01-12 13:07:09 +01:00
enum enum_slave_type_conversions { SLAVE_TYPE_CONVERSIONS_ALL_LOSSY,
SLAVE_TYPE_CONVERSIONS_ALL_NON_LOSSY};
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
enum enum_mark_columns
{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
2009-10-12 08:22:53 +02:00
enum enum_filetype { FILETYPE_CSV, FILETYPE_XML };
/* Bits for different SQL modes modes (including ANSI mode) */
#define MODE_REAL_AS_FLOAT 1
#define MODE_PIPES_AS_CONCAT 2
#define MODE_ANSI_QUOTES 4
#define MODE_IGNORE_SPACE 8
#define MODE_NOT_USED 16
#define MODE_ONLY_FULL_GROUP_BY 32
#define MODE_NO_UNSIGNED_SUBTRACTION 64
#define MODE_NO_DIR_IN_CREATE 128
#define MODE_POSTGRESQL 256
#define MODE_ORACLE 512
#define MODE_MSSQL 1024
#define MODE_DB2 2048
#define MODE_MAXDB 4096
#define MODE_NO_KEY_OPTIONS 8192
#define MODE_NO_TABLE_OPTIONS 16384
#define MODE_NO_FIELD_OPTIONS 32768
#define MODE_MYSQL323 65536L
#define MODE_MYSQL40 (MODE_MYSQL323*2)
#define MODE_ANSI (MODE_MYSQL40*2)
#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2)
#define MODE_NO_BACKSLASH_ESCAPES (MODE_NO_AUTO_VALUE_ON_ZERO*2)
#define MODE_STRICT_TRANS_TABLES (MODE_NO_BACKSLASH_ESCAPES*2)
#define MODE_STRICT_ALL_TABLES (MODE_STRICT_TRANS_TABLES*2)
#define MODE_NO_ZERO_IN_DATE (MODE_STRICT_ALL_TABLES*2)
#define MODE_NO_ZERO_DATE (MODE_NO_ZERO_IN_DATE*2)
#define MODE_INVALID_DATES (MODE_NO_ZERO_DATE*2)
#define MODE_ERROR_FOR_DIVISION_BY_ZERO (MODE_INVALID_DATES*2)
#define MODE_TRADITIONAL (MODE_ERROR_FOR_DIVISION_BY_ZERO*2)
#define MODE_NO_AUTO_CREATE_USER (MODE_TRADITIONAL*2)
#define MODE_HIGH_NOT_PRECEDENCE (MODE_NO_AUTO_CREATE_USER*2)
#define MODE_NO_ENGINE_SUBSTITUTION (MODE_HIGH_NOT_PRECEDENCE*2)
#define MODE_PAD_CHAR_TO_FULL_LENGTH (ULL(1) << 31)
2003-04-02 15:16:19 +02:00
extern char internal_table_name[2];
extern char empty_c_string[1];
extern MYSQL_PLUGIN_IMPORT const char **errmesg;
2003-04-02 15:16:19 +02:00
extern bool volatile shutdown_in_progress;
2005-01-16 13:16:23 +01:00
#define TC_LOG_PAGE_SIZE 8192
#define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE)
#define TC_HEURISTIC_RECOVER_COMMIT 1
#define TC_HEURISTIC_RECOVER_ROLLBACK 2
extern uint tc_heuristic_recover;
2003-01-30 18:39:54 +01:00
typedef struct st_user_var_events
{
user_var_entry *user_var_event;
char *value;
ulong length;
Item_result type;
uint charset_number;
bool unsigned_flag;
2003-01-30 18:39:54 +01:00
} BINLOG_USER_VAR_EVENT;
#define RP_LOCK_LOG_IS_ALREADY_LOCKED 1
#define RP_FORCE_ROTATE 2
/*
The COPY_INFO structure is used by INSERT/REPLACE code.
The schema of the row counting by the INSERT/INSERT ... ON DUPLICATE KEY
UPDATE code:
If a row is inserted then the copied variable is incremented.
If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the
new data differs from the old one then the copied and the updated
variables are incremented.
The touched variable is incremented if a row was touched by the update part
of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row
was actually changed or not.
*/
2000-07-31 21:29:14 +02:00
typedef struct st_copy_info {
2007-10-16 22:11:50 +02:00
ha_rows records; /**< Number of processed records */
ha_rows deleted; /**< Number of deleted records */
ha_rows updated; /**< Number of updated records */
ha_rows copied; /**< Number of copied records */
ha_rows error_count;
ha_rows touched; /* Number of touched records */
2000-07-31 21:29:14 +02:00
enum enum_duplicates handle_duplicates;
int escape_char, last_errno;
bool ignore;
/* for INSERT ... UPDATE */
2002-12-02 20:38:00 +01:00
List<Item> *update_fields;
List<Item> *update_values;
/* for VIEW ... WITH CHECK OPTION */
TABLE_LIST *view;
2000-07-31 21:29:14 +02:00
} COPY_INFO;
class Key_part_spec :public Sql_alloc {
2000-07-31 21:29:14 +02:00
public:
LEX_STRING field_name;
2000-07-31 21:29:14 +02:00
uint length;
Key_part_spec(const LEX_STRING &name, uint len)
: field_name(name), length(len)
{}
Key_part_spec(const char *name, const size_t name_len, uint len)
: length(len)
{ field_name.str= (char *)name; field_name.length= name_len; }
bool operator==(const Key_part_spec& other) const;
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
/**
Construct a copy of this Key_part_spec. field_name is copied
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
by-pointer as it is known to never change. At the same time
'length' may be reset in mysql_prepare_create_table, and this
is why we supply it with a copy.
@return If out of memory, 0 is returned and an error is set in
THD.
*/
Key_part_spec *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Key_part_spec(*this); }
2000-07-31 21:29:14 +02:00
};
class Alter_drop :public Sql_alloc {
public:
enum drop_type {KEY, COLUMN };
const char *name;
enum drop_type type;
Alter_drop(enum drop_type par_type,const char *par_name)
:name(par_name), type(par_type) {}
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
*/
Alter_drop *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Alter_drop(*this); }
2000-07-31 21:29:14 +02:00
};
class Alter_column :public Sql_alloc {
public:
const char *name;
Item *def;
Alter_column(const char *par_name,Item *literal)
:name(par_name), def(literal) {}
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
*/
Alter_column *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Alter_column(*this); }
2000-07-31 21:29:14 +02:00
};
class Key :public Sql_alloc {
public:
enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT, SPATIAL, FOREIGN_KEY};
2000-07-31 21:29:14 +02:00
enum Keytype type;
KEY_CREATE_INFO key_create_info;
List<Key_part_spec> columns;
LEX_STRING name;
bool generated;
2000-07-31 21:29:14 +02:00
Key(enum Keytype type_par, const LEX_STRING &name_arg,
KEY_CREATE_INFO *key_info_arg,
bool generated_arg, List<Key_part_spec> &cols)
:type(type_par), key_create_info(*key_info_arg), columns(cols),
name(name_arg), generated(generated_arg)
{}
Key(enum Keytype type_par, const char *name_arg, size_t name_len_arg,
KEY_CREATE_INFO *key_info_arg, bool generated_arg,
List<Key_part_spec> &cols)
:type(type_par), key_create_info(*key_info_arg), columns(cols),
generated(generated_arg)
{
name.str= (char *)name_arg;
name.length= name_len_arg;
}
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
Key(const Key &rhs, MEM_ROOT *mem_root);
virtual ~Key() {}
/* Equality comparison of keys (ignoring name) */
friend bool foreign_key_prefix(Key *a, Key *b);
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
*/
virtual Key *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Key(*this, mem_root); }
2000-07-31 21:29:14 +02:00
};
class Table_ident;
class Foreign_key: public Key {
public:
enum fk_match_opt { FK_MATCH_UNDEF, FK_MATCH_FULL,
FK_MATCH_PARTIAL, FK_MATCH_SIMPLE};
enum fk_option { FK_OPTION_UNDEF, FK_OPTION_RESTRICT, FK_OPTION_CASCADE,
FK_OPTION_SET_NULL, FK_OPTION_NO_ACTION, FK_OPTION_DEFAULT};
Table_ident *ref_table;
List<Key_part_spec> ref_columns;
uint delete_opt, update_opt, match_opt;
Foreign_key(const LEX_STRING &name_arg, List<Key_part_spec> &cols,
Table_ident *table, List<Key_part_spec> &ref_cols,
uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg)
:Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols),
ref_table(table), ref_columns(ref_cols),
delete_opt(delete_opt_arg), update_opt(update_opt_arg),
match_opt(match_opt_arg)
{}
Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root);
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
*/
virtual Key *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Foreign_key(*this, mem_root); }
};
2000-07-31 21:29:14 +02:00
typedef struct st_mysql_lock
{
TABLE **table;
uint table_count,lock_count;
THR_LOCK_DATA **locks;
} MYSQL_LOCK;
class LEX_COLUMN : public Sql_alloc
{
public:
String column;
uint rights;
LEX_COLUMN (const String& x,const uint& y ): column (x),rights (y) {}
};
class MY_LOCALE;
/**
Query_cache_tls -- query cache thread local data.
*/
struct Query_cache_block;
struct Query_cache_tls
{
/*
'first_query_block' should be accessed only via query cache
functions and methods to maintain proper locking.
*/
Query_cache_block *first_query_block;
void set_first_query_block(Query_cache_block *first_query_block_arg)
{
first_query_block= first_query_block_arg;
}
Query_cache_tls() :first_query_block(NULL) {}
};
/* SIGNAL / RESIGNAL / GET DIAGNOSTICS */
/**
This enumeration list all the condition item names of a condition in the
SQL condition area.
*/
typedef enum enum_diag_condition_item_name
{
/*
Conditions that can be set by the user (SIGNAL/RESIGNAL),
and by the server implementation.
*/
DIAG_CLASS_ORIGIN= 0,
FIRST_DIAG_SET_PROPERTY= DIAG_CLASS_ORIGIN,
DIAG_SUBCLASS_ORIGIN= 1,
DIAG_CONSTRAINT_CATALOG= 2,
DIAG_CONSTRAINT_SCHEMA= 3,
DIAG_CONSTRAINT_NAME= 4,
DIAG_CATALOG_NAME= 5,
DIAG_SCHEMA_NAME= 6,
DIAG_TABLE_NAME= 7,
DIAG_COLUMN_NAME= 8,
DIAG_CURSOR_NAME= 9,
DIAG_MESSAGE_TEXT= 10,
DIAG_MYSQL_ERRNO= 11,
LAST_DIAG_SET_PROPERTY= DIAG_MYSQL_ERRNO
} Diag_condition_item_name;
/**
Name of each diagnostic condition item.
This array is indexed by Diag_condition_item_name.
*/
extern const LEX_STRING Diag_condition_item_names[];
2000-07-31 21:29:14 +02:00
#include "sql_lex.h" /* Must be here */
class Delayed_insert;
class select_result;
class Time_zone;
2000-07-31 21:29:14 +02:00
#define THD_SENTRY_MAGIC 0xfeedd1ff
#define THD_SENTRY_GONE 0xdeadbeef
#define THD_CHECK_SENTRY(thd) DBUG_ASSERT(thd->dbug_sentry == THD_SENTRY_MAGIC)
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
typedef struct system_variables
{
/*
How dynamically allocated system variables are handled:
The global_system_variables and max_system_variables are "authoritative"
They both should have the same 'version' and 'size'.
When attempting to access a dynamic variable, if the session version
is out of date, then the session version is updated and realloced if
neccessary and bytes copied from global to make up for missing data.
*/
ulong dynamic_variables_version;
char* dynamic_variables_ptr;
uint dynamic_variables_head; /* largest valid variable offset */
uint dynamic_variables_size; /* how many bytes are in use */
ulonglong max_heap_table_size;
ulonglong tmp_table_size;
ulonglong long_query_time;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
ulonglong optimizer_switch;
ulonglong sql_mode; ///< which non-standard SQL behaviour should be enabled
ulonglong option_bits; ///< OPTION_xxx constants, e.g. OPTION_PROFILING
ha_rows select_limit;
ha_rows max_join_size;
ulong auto_increment_increment, auto_increment_offset;
ulong bulk_insert_buff_size;
ulong join_buff_size;
Bug #45225 Locking: hang if drop table with no timeout This patch introduces timeouts for metadata locks. The timeout is specified in seconds using the new dynamic system variable "lock_wait_timeout" which has both GLOBAL and SESSION scopes. Allowed values range from 1 to 31536000 seconds (= 1 year). The default value is 1 year. The new server parameter "lock-wait-timeout" can be used to set the default value parameter upon server startup. "lock_wait_timeout" applies to all statements that use metadata locks. These include DML and DDL operations on tables, views, stored procedures and stored functions. They also include LOCK TABLES, FLUSH TABLES WITH READ LOCK and HANDLER statements. The patch also changes thr_lock.c code (table data locks used by MyISAM and other simplistic engines) to use the same system variable. InnoDB row locks are unaffected. One exception to the handling of the "lock_wait_timeout" variable is delayed inserts. All delayed inserts are executed with a timeout of 1 year regardless of the setting for the global variable. As the connection issuing the delayed insert gets no notification of delayed insert timeouts, we want to avoid unnecessary timeouts. It's important to note that the timeout value is used for each lock acquired and that one statement can take more than one lock. A statement can therefore block for longer than the lock_wait_timeout value before reporting a timeout error. When lock timeout occurs, ER_LOCK_WAIT_TIMEOUT is reported. Test case added to lock_multi.test.
2010-02-11 11:23:39 +01:00
ulong lock_wait_timeout;
ulong max_allowed_packet;
ulong max_error_count;
ulong max_length_for_sort_data;
ulong max_sort_length;
ulong max_tmp_tables;
ulong max_insert_delayed_threads;
ulong min_examined_row_limit;
ulong multi_range_count;
ulong net_buffer_length;
ulong net_interactive_timeout;
ulong net_read_timeout;
ulong net_retry_count;
ulong net_wait_timeout;
ulong net_write_timeout;
ulong optimizer_prune_level;
ulong optimizer_search_depth;
ulong preload_buff_size;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
ulong profiling_history_size;
ulong read_buff_size;
ulong read_rnd_buff_size;
ulong div_precincrement;
ulong sortbuff_size;
ulong max_sp_recursion_depth;
2003-04-02 15:16:19 +02:00
ulong default_week_format;
ulong max_seeks_for_key;
ulong range_alloc_block_size;
ulong query_alloc_block_size;
ulong query_prealloc_size;
ulong trans_alloc_block_size;
ulong trans_prealloc_size;
ulong log_warnings;
ulong group_concat_max_len;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
uint binlog_format; ///< binlog format for this thd (see enum_binlog_format)
my_bool binlog_direct_non_trans_update;
my_bool sql_log_bin;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
uint completion_type;
uint query_cache_type;
uint tx_isolation;
uint updatable_views_with_limit;
uint max_user_connections;
/**
2002-12-29 22:46:48 +01:00
In slave thread we need to know in behalf of which
thread the query is being run to replicate temp tables properly
*/
Fixed compiler warnings Fixed compile-pentium64 scripts Fixed wrong estimate of update_with_key_prefix in sql-bench Merge bk-internal.mysql.com:/home/bk/mysql-5.1 into mysql.com:/home/my/mysql-5.1 Fixed unsafe define of uint4korr() Fixed that --extern works with mysql-test-run.pl Small trivial cleanups This also fixes a bug in counting number of rows that are updated when we have many simultanous queries Move all connection handling and command exectuion main loop from sql_parse.cc to sql_connection.cc Split handle_one_connection() into reusable sub functions. Split create_new_thread() into reusable sub functions. Added thread_scheduler; Preliminary interface code for future thread_handling code. Use 'my_thread_id' for internal thread id's Make thr_alarm_kill() to depend on thread_id instead of thread Make thr_abort_locks_for_thread() depend on thread_id instead of thread In store_globals(), set my_thread_var->id to be thd->thread_id. Use my_thread_var->id as basis for my_thread_name() The above changes makes the connection we have between THD and threads more soft. Added a lot of DBUG_PRINT() and DBUG_ASSERT() functions Fixed compiler warnings Fixed core dumps when running with --debug Removed setting of signal masks (was never used) Made event code call pthread_exit() (portability fix) Fixed that event code doesn't call DBUG_xxx functions before my_thread_init() is called. Made handling of thread_id and thd->variables.pseudo_thread_id uniform. Removed one common 'not freed memory' warning from mysqltest Fixed a couple of usage of not initialized warnings (unlikely cases) Suppress compiler warnings from bdb and (for the moment) warnings from ndb
2007-02-23 12:13:55 +01:00
my_thread_id pseudo_thread_id;
2002-12-29 22:46:48 +01:00
2003-04-02 15:16:19 +02:00
my_bool low_priority_updates;
my_bool new_mode;
my_bool query_cache_wlock_invalidate;
2005-02-11 22:05:24 +01:00
my_bool engine_condition_pushdown;
my_bool keep_files_on_create;
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement), and new binlog format called "mixed" (which is statement-based except if only row-based is correct, in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release): SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default; the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha. It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below). The added tests test the possibility or impossibility to SET, their effects, and the mixed mode, including in prepared statements and in stored procedures and functions. Caveats: a) The mixed mode will not work for stored functions: in mixed mode, a stored function will always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()). b) for the same reason, changing the thread's binlog format inside a stored function is refused with an error message. c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask Dmitri). Additionally, as the binlog format is now changeable by each user for his session, I remove the implication which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1 (not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled phantom protection). Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
2005-07-22 22:43:59 +02:00
my_bool old_alter_table;
my_bool old_passwords;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
my_bool big_tables;
plugin_ref table_plugin;
Fixed compiler warnings Fixed compile-pentium64 scripts Fixed wrong estimate of update_with_key_prefix in sql-bench Merge bk-internal.mysql.com:/home/bk/mysql-5.1 into mysql.com:/home/my/mysql-5.1 Fixed unsafe define of uint4korr() Fixed that --extern works with mysql-test-run.pl Small trivial cleanups This also fixes a bug in counting number of rows that are updated when we have many simultanous queries Move all connection handling and command exectuion main loop from sql_parse.cc to sql_connection.cc Split handle_one_connection() into reusable sub functions. Split create_new_thread() into reusable sub functions. Added thread_scheduler; Preliminary interface code for future thread_handling code. Use 'my_thread_id' for internal thread id's Make thr_alarm_kill() to depend on thread_id instead of thread Make thr_abort_locks_for_thread() depend on thread_id instead of thread In store_globals(), set my_thread_var->id to be thd->thread_id. Use my_thread_var->id as basis for my_thread_name() The above changes makes the connection we have between THD and threads more soft. Added a lot of DBUG_PRINT() and DBUG_ASSERT() functions Fixed compiler warnings Fixed core dumps when running with --debug Removed setting of signal masks (was never used) Made event code call pthread_exit() (portability fix) Fixed that event code doesn't call DBUG_xxx functions before my_thread_init() is called. Made handling of thread_id and thd->variables.pseudo_thread_id uniform. Removed one common 'not freed memory' warning from mysqltest Fixed a couple of usage of not initialized warnings (unlikely cases) Suppress compiler warnings from bdb and (for the moment) warnings from ndb
2007-02-23 12:13:55 +01:00
/* Only charset part of these variables is sensible */
CHARSET_INFO *character_set_filesystem;
CHARSET_INFO *character_set_client;
CHARSET_INFO *character_set_results;
/* Both charset and collation parts of these variables are important */
CHARSET_INFO *collation_server;
CHARSET_INFO *collation_database;
CHARSET_INFO *collation_connection;
/* Error messages */
MY_LOCALE *lc_messages;
/* Locale Support */
MY_LOCALE *lc_time_names;
Time_zone *time_zone;
my_bool sysdate_is_now;
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
double long_query_time_double;
2010-06-27 07:51:45 +02:00
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
} SV;
/**
Per thread status variables.
Must be long/ulong up to last_system_status_var so that
add_to_status/add_diff_to_status can work.
*/
typedef struct system_status_var
{
ulong com_other;
ulong com_stat[(uint) SQLCOM_END];
ulong created_tmp_disk_tables;
ulong created_tmp_tables;
ulong ha_commit_count;
ulong ha_delete_count;
ulong ha_read_first_count;
ulong ha_read_last_count;
ulong ha_read_key_count;
ulong ha_read_next_count;
ulong ha_read_prev_count;
ulong ha_read_rnd_count;
ulong ha_read_rnd_next_count;
ulong ha_rollback_count;
ulong ha_update_count;
ulong ha_write_count;
2005-01-16 13:16:23 +01:00
ulong ha_prepare_count;
ulong ha_discover_count;
ulong ha_savepoint_count;
ulong ha_savepoint_rollback_count;
/* KEY_CACHE parts. These are copies of the original */
ulong key_blocks_changed;
ulong key_blocks_used;
ulong key_cache_r_requests;
ulong key_cache_read;
ulong key_cache_w_requests;
ulong key_cache_write;
/* END OF KEY_CACHE parts */
ulong net_big_packet_count;
ulong opened_tables;
ulong opened_shares;
ulong select_full_join_count;
ulong select_full_range_join_count;
ulong select_range_count;
ulong select_range_check_count;
ulong select_scan_count;
ulong long_query_count;
ulong filesort_merge_passes;
ulong filesort_range_count;
ulong filesort_rows;
ulong filesort_scan_count;
/* Prepared statements and binary protocol */
2005-06-16 22:34:35 +02:00
ulong com_stmt_prepare;
ulong com_stmt_reprepare;
2005-06-16 22:34:35 +02:00
ulong com_stmt_execute;
ulong com_stmt_send_long_data;
ulong com_stmt_fetch;
ulong com_stmt_reset;
ulong com_stmt_close;
/*
Number of statements sent from the client
*/
ulong questions;
ulonglong bytes_received;
ulonglong bytes_sent;
/*
IMPORTANT!
SEE last_system_status_var DEFINITION BELOW.
Below 'last_system_status_var' are all variables that cannot be handled
automatically by add_to_status()/add_diff_to_status().
*/
double last_query_cost;
} STATUS_VAR;
/*
This is used for 'SHOW STATUS'. It must be updated to the last ulong
variable in system_status_var which is makes sens to add to the global
counter
*/
#define last_system_status_var questions
2007-08-02 19:49:27 +02:00
void mark_transaction_to_rollback(THD *thd, bool all);
#ifdef MYSQL_SERVER
void free_tmp_table(THD *thd, TABLE *entry);
/* The following macro is to make init of Query_arena simpler */
#ifndef DBUG_OFF
#define INIT_ARENA_DBUG_INFO is_backup_arena= 0; is_reprepared= FALSE;
#else
#define INIT_ARENA_DBUG_INFO
#endif
class Query_arena
{
public:
/*
List of items created in the parser for this query. Every item puts
itself to the list on creation (see Item::Item() for details))
*/
Item *free_list;
MEM_ROOT *mem_root; // Pointer to current memroot
#ifndef DBUG_OFF
bool is_backup_arena; /* True if this arena is used for backup. */
bool is_reprepared;
#endif
/*
The states relfects three diffrent life cycles for three
different types of statements:
Prepared statement: INITIALIZED -> PREPARED -> EXECUTED.
Stored procedure: INITIALIZED_FOR_SP -> EXECUTED.
Other statements: CONVENTIONAL_EXECUTION never changes.
*/
enum enum_state
{
INITIALIZED= 0, INITIALIZED_FOR_SP= 1, PREPARED= 2,
CONVENTIONAL_EXECUTION= 3, EXECUTED= 4, ERROR= -1
};
2004-08-31 12:07:02 +02:00
enum_state state;
/* We build without RTTI, so dynamic_cast can't be used. */
enum Type
{
STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE
};
Query_arena(MEM_ROOT *mem_root_arg, enum enum_state state_arg) :
free_list(0), mem_root(mem_root_arg), state(state_arg)
{ INIT_ARENA_DBUG_INFO; }
/*
This constructor is used only when Query_arena is created as
backup storage for another instance of Query_arena.
*/
Query_arena() { INIT_ARENA_DBUG_INFO; }
virtual Type type() const;
virtual ~Query_arena() {};
inline bool is_stmt_prepare() const { return state == INITIALIZED; }
inline bool is_first_sp_execute() const
{ return state == INITIALIZED_FOR_SP; }
inline bool is_stmt_prepare_or_first_sp_execute() const
{ return (int)state < (int)PREPARED; }
inline bool is_stmt_prepare_or_first_stmt_execute() const
{ return (int)state <= (int)PREPARED; }
2004-08-31 12:07:02 +02:00
inline bool is_first_stmt_execute() const { return state == PREPARED; }
inline bool is_stmt_execute() const
{ return state == PREPARED || state == EXECUTED; }
inline bool is_conventional() const
2004-09-10 01:22:44 +02:00
{ return state == CONVENTIONAL_EXECUTION; }
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
inline void* alloc(size_t size) { return alloc_root(mem_root,size); }
inline void* calloc(size_t size)
{
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
void *ptr;
if ((ptr=alloc_root(mem_root,size)))
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
bzero(ptr, size);
return ptr;
}
inline char *strdup(const char *str)
{ return strdup_root(mem_root,str); }
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
inline char *strmake(const char *str, size_t size)
{ return strmake_root(mem_root,str,size); }
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
inline void *memdup(const void *str, size_t size)
{ return memdup_root(mem_root,str,size); }
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
inline void *memdup_w_gap(const void *str, size_t size, uint gap)
{
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
void *ptr;
if ((ptr= alloc_root(mem_root,size+gap)))
memcpy(ptr,str,size);
return ptr;
}
void set_query_arena(Query_arena *set);
void free_items();
/* Close the active state associated with execution of this statement */
virtual void cleanup_stmt();
};
class Server_side_cursor;
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class Statement
@brief State of a single command executed against this connection.
One connection can contain a lot of simultaneously running statements,
some of which could be:
- prepared, that is, contain placeholders,
- opened as cursors. We maintain 1 to 1 relationship between
statement and cursor - if user wants to create another cursor for his
query, we create another statement for it.
To perform some action with statement we reset THD part to the state of
that statement, do the action, and then save back modified state from THD
to the statement. It will be changed in near future, and Statement will
be used explicitly.
*/
class Statement: public ilink, public Query_arena
{
Statement(const Statement &rhs); /* not implemented: */
Statement &operator=(const Statement &rhs); /* non-copyable */
public:
/*
2003-12-04 20:08:26 +01:00
Uniquely identifies each statement object in thread scope; change during
statement lifetime. FIXME: must be const
*/
2003-12-04 20:08:26 +01:00
ulong id;
/*
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
MARK_COLUMNS_NONE: Means mark_used_colums is not set and no indicator to
handler of fields used is set
MARK_COLUMNS_READ: Means a bit in read set is set to inform handler
that the field is to be read. If field list contains
duplicates, then thd->dup_field is set to point
to the last found duplicate.
MARK_COLUMNS_WRITE: Means a bit is set in write set to inform handler
that it needs to update this field in write_row
and update_row.
*/
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
enum enum_mark_columns mark_used_columns;
LEX_STRING name; /* name for named prepared statements */
LEX *lex; // parse tree descriptor
/*
Points to the query associated with this statement. It's const, but
we need to declare it char * because all table handlers are written
in C and need to point to it.
Note that if we set query = NULL, we must at the same time set
query_length = 0, and protect the whole operation with
LOCK_thd_data mutex. To avoid crashes in races, if we do not
know that thd->query cannot change at the moment, we should print
thd->query like this:
(1) reserve the LOCK_thd_data mutex;
(2) print or copy the value of query and query_length
(3) release LOCK_thd_data mutex.
This printing is needed at least in SHOW PROCESSLIST and SHOW
ENGINE INNODB STATUS.
*/
LEX_STRING query_string;
Server_side_cursor *cursor;
inline char *query() { return query_string.str; }
inline uint32 query_length() { return query_string.length; }
void set_query_inner(char *query_arg, uint32 query_length_arg);
/**
Name of the current (default) database.
If there is the current (default) database, "db" contains its name. If
there is no current (default) database, "db" is NULL and "db_length" is
0. In other words, "db", "db_length" must either be NULL, or contain a
valid database name.
@note this attribute is set and alloced by the slave SQL thread (for
the THD of that thread); that thread is (and must remain, for now) the
only responsible for freeing this member.
*/
char *db;
size_t db_length;
public:
2005-06-22 21:12:25 +02:00
/* This constructor is called for backup statements */
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
Statement() {}
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
enum enum_state state_arg, ulong id_arg);
virtual ~Statement();
/* Assign execution context (note: not all members) of given stmt to self */
virtual void set_statement(Statement *stmt);
void set_n_backup_statement(Statement *stmt, Statement *backup);
void restore_backup_statement(Statement *stmt, Statement *backup);
/* return class type */
virtual Type type() const;
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
Container for all statements created/used in a connection.
Statements in Statement_map have unique Statement::id (guaranteed by id
assignment in Statement::Statement)
Non-empty statement names are unique too: attempt to insert a new statement
with duplicate name causes older statement to be deleted
Statements are auto-deleted when they are removed from the map and when the
map is deleted.
*/
class Statement_map
{
public:
Statement_map();
int insert(THD *thd, Statement *statement);
Statement *find_by_name(LEX_STRING *name)
{
Statement *stmt;
stmt= (Statement*)my_hash_search(&names_hash, (uchar*)name->str,
name->length);
return stmt;
}
Statement *find(ulong id)
{
if (last_found_statement == 0 || id != last_found_statement->id)
{
Statement *stmt;
stmt= (Statement *) my_hash_search(&st_hash, (uchar *) &id, sizeof(id));
if (stmt && stmt->name.str)
return NULL;
last_found_statement= stmt;
}
return last_found_statement;
}
/*
Close all cursors of this connection that use tables of a storage
engine that has transaction-specific state and therefore can not
survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
*/
void close_transient_cursors();
void erase(Statement *statement);
/* Erase all statements (calls Statement destructor) */
void reset();
~Statement_map();
private:
HASH st_hash;
HASH names_hash;
I_List<Statement> transient_cursor_list;
Statement *last_found_statement;
};
2005-01-16 13:16:23 +01:00
struct st_savepoint {
struct st_savepoint *prev;
char *name;
uint length;
Ha_trx_info *ha_list;
Backport of: ------------------------------------------------------------ revno: 2617.23.18 committer: Davi Arnaut <Davi.Arnaut@Sun.COM> branch nick: 4284-6.0 timestamp: Mon 2009-03-02 18:18:26 -0300 message: Bug#989: If DROP TABLE while there's an active transaction, wrong binlog order WL#4284: Transactional DDL locking This is a prerequisite patch: These changes are intended to split lock requests from granted locks and to allow the memory and lifetime of granted locks to be managed within the MDL subsystem. Furthermore, tickets can now be shared and therefore are used to satisfy multiple lock requests, but only shared locks can be recursive. The problem is that the MDL subsystem morphs lock requests into granted locks locks but does not manage the memory and lifetime of lock requests, and hence, does not manage the memory of granted locks either. This can be problematic because it puts the burden of tracking references on the users of the subsystem and it can't be easily done in transactional contexts where the locks have to be kept around for the duration of a transaction. Another issue is that recursive locks (when the context trying to acquire a lock already holds a lock on the same object) requires that each time the lock is granted, a unique lock request/granted lock structure structure must be kept around until the lock is released. This can lead to memory leaks in transactional contexts as locks taken during the transaction should only be released at the end of the transaction. This also leads to unnecessary wake ups (broadcasts) in the MDL subsystem if the context still holds a equivalent of the lock being released. These issues are exacerbated due to the fact that WL#4284 low-level design says that the implementation should "2) Store metadata locks in transaction memory root, rather than statement memory root" but this is not possible because a memory root, as implemented in mysys, requires all objects allocated from it to be freed all at once. This patch combines review input and significant code contributions from Konstantin Osipov (kostja) and Dmitri Lenev (dlenev).
2009-12-04 00:29:40 +01:00
/** Last acquired lock before this savepoint was set. */
MDL_ticket *mdl_savepoint;
2005-01-16 13:16:23 +01:00
};
enum xa_states {XA_NOTR=0, XA_ACTIVE, XA_IDLE, XA_PREPARED, XA_ROLLBACK_ONLY};
extern const char *xa_state_names[];
typedef struct st_xid_state {
/* For now, this is only used to catch duplicated external xids */
XID xid; // transaction identifier
enum xa_states xa_state; // used by external XA only
bool in_thd;
/* Error reported by the Resource Manager (RM) to the Transaction Manager. */
uint rm_error;
} XID_STATE;
extern mysql_mutex_t LOCK_xid_cache;
extern HASH xid_cache;
bool xid_cache_init(void);
void xid_cache_free(void);
XID_STATE *xid_cache_search(XID *xid);
bool xid_cache_insert(XID *xid, enum xa_states xa_state);
bool xid_cache_insert(XID_STATE *xid_state);
void xid_cache_delete(XID_STATE *xid_state);
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class Security_context
@brief A set of THD members describing the current authenticated user.
*/
class Security_context {
public:
Security_context() {} /* Remove gcc warning */
/*
host - host of the client
user - user of the client, set to NULL until the user has been read from
the connection
priv_user - The user privilege we are using. May be "" for anonymous user.
ip - client IP
*/
char *host, *user, *priv_user, *ip;
/* The host privilege we are using */
char priv_host[MAX_HOSTNAME];
/* points to host if host is available, otherwise points to ip */
const char *host_or_ip;
ulong master_access; /* Global privileges from mysql.user */
ulong db_access; /* Privileges for current db */
void init();
void destroy();
void skip_grants();
inline char *priv_host_name()
{
return (*priv_host ? priv_host : (char *)"%");
}
bool set_user(char *user_arg);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
bool
change_security_context(THD *thd,
LEX_STRING *definer_user,
LEX_STRING *definer_host,
LEX_STRING *db,
Security_context **backup);
void
restore_security_context(THD *thd, Security_context *backup);
#endif
bool user_matches(Security_context *);
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
A registry for item tree transformations performed during
query optimization. We register only those changes which require
a rollback to re-execute a prepared statement or stored procedure
yet another time.
*/
struct Item_change_record;
typedef I_List<Item_change_record> Item_change_list;
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
Type of locked tables mode.
See comment for THD::locked_tables_mode for complete description.
*/
enum enum_locked_tables_mode
{
LTM_NONE= 0,
LTM_LOCK_TABLES,
LTM_PRELOCKED,
LTM_PRELOCKED_UNDER_LOCK_TABLES
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
Class that holds information about tables which were opened and locked
by the thread. It is also used to save/restore this information in
push_open_tables_state()/pop_open_tables_state().
*/
class Open_tables_state
{
public:
/**
As part of class THD, this member is set during execution
of a prepared statement. When it is set, it is used
by the locking subsystem to report a change in table metadata.
When Open_tables_state part of THD is reset to open
a system or INFORMATION_SCHEMA table, the member is cleared
to avoid spurious ER_NEED_REPREPARE errors -- system and
INFORMATION_SCHEMA tables are not subject to metadata version
tracking.
@sa check_and_update_table_version()
*/
Reprepare_observer *m_reprepare_observer;
2007-07-27 14:37:29 +02:00
/**
List of regular tables in use by this thread. Contains temporary and
base tables that were opened with @see open_tables().
*/
TABLE *open_tables;
/**
List of temporary tables used by this thread. Contains user-level
temporary tables, created with CREATE TEMPORARY TABLE, and
internal temporary tables, created, e.g., to resolve a SELECT,
or for an intermediate table used in ALTER.
XXX Why are internal temporary tables added to this list?
*/
2007-07-27 14:37:29 +02:00
TABLE *temporary_tables;
TABLE *derived_tables;
/*
During a MySQL session, one can lock tables in two modes: automatic
or manual. In automatic mode all necessary tables are locked just before
statement execution, and all acquired locks are stored in 'lock'
member. Unlocking takes place automatically as well, when the
statement ends.
Manual mode comes into play when a user issues a 'LOCK TABLES'
statement. In this mode the user can only use the locked tables.
Trying to use any other tables will give an error.
The locked tables are also stored in this member, however,
thd->locked_tables_mode is turned on. Manual locking is described in
the 'LOCK_TABLES' chapter of the MySQL manual.
See also lock_tables() for details.
*/
MYSQL_LOCK *lock;
/*
CREATE-SELECT keeps an extra lock for the table being
created. This field is used to keep the extra lock available for
lower level routines, which would otherwise miss that lock.
*/
MYSQL_LOCK *extra_lock;
/*
Enum enum_locked_tables_mode and locked_tables_mode member are
used to indicate whether the so-called "locked tables mode" is on,
and what kind of mode is active.
Locked tables mode is used when it's necessary to open and
lock many tables at once, for usage across multiple
(sub-)statements.
This may be necessary either for queries that use stored functions
and triggers, in which case the statements inside functions and
triggers may be executed many times, or for implementation of
LOCK TABLES, in which case the opened tables are reused by all
subsequent statements until a call to UNLOCK TABLES.
The kind of locked tables mode employed for stored functions and
triggers is also called "prelocked mode".
In this mode, first open_tables() call to open the tables used
in a statement analyses all functions used by the statement
and adds all indirectly used tables to the list of tables to
open and lock.
It also marks the parse tree of the statement as requiring
prelocking. After that, lock_tables() locks the entire list
of tables and changes THD::locked_tables_modeto LTM_PRELOCKED.
All statements executed inside functions or triggers
use the prelocked tables, instead of opening their own ones.
Prelocked mode is turned off automatically once close_thread_tables()
of the main statement is called.
*/
enum enum_locked_tables_mode locked_tables_mode;
uint current_tablenr;
enum enum_flags {
BACKUPS_AVAIL = (1U << 0) /* There are backups available */
};
/*
Flags with information about the open tables state.
*/
uint state_flags;
/**
This constructor initializes Open_tables_state instance which can only
be used as backup storage. To prepare Open_tables_state instance for
operations which open/lock/close tables (e.g. open_table()) one has to
call init_open_tables_state().
*/
Open_tables_state() : state_flags(0U) { }
void set_open_tables_state(Open_tables_state *state)
{
*this= *state;
}
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
void reset_open_tables_state(THD *thd)
{
A prerequisite patch for the fix for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". Introduce a notion of a sentinel to MDL_context. A sentinel is a ticket that separates all tickets in the context into two groups: before and after it. Currently we can have (and need) only one designated sentinel -- it separates all locks taken by LOCK TABLE or HANDLER statement, which must survive COMMIT and ROLLBACK and all other locks, which must be released at COMMIT or ROLLBACK. The tricky part is maintaining the sentinel up to date when someone release its corresponding ticket. This can happen, e.g. if someone issues DROP TABLE under LOCK TABLES (generally, see all calls to release_all_locks_for_name()). MDL_context::release_ticket() is modified to take care of it. ****** A fix and a test case for Bug#46224 "HANDLER statements within a transaction might lead to deadlocks". An attempt to mix HANDLER SQL statements, which are transaction- agnostic, an open multi-statement transaction, and DDL against the involved tables (in a concurrent connection) could lead to a deadlock. The deadlock would occur when HANDLER OPEN or HANDLER READ would have to wait on a conflicting metadata lock. If the connection that issued HANDLER statement also had other metadata locks (say, acquired in scope of a transaction), a classical deadlock situation of mutual wait could occur. Incompatible change: entering LOCK TABLES mode automatically closes all open HANDLERs in the current connection. Incompatible change: previously an attempt to wait on a lock in a connection that has an open HANDLER statement could wait indefinitely/deadlock. After this patch, an error ER_LOCK_DEADLOCK is produced. The idea of the fix is to merge thd->handler_mdl_context with the main mdl_context of the connection, used for transactional locks. This makes deadlock detection possible, since all waits with locks are "visible" and available to analysis in a single MDL context of the connection. Since HANDLER locks and transactional locks have a different life cycle -- HANDLERs are explicitly open and closed, and so are HANDLER locks, explicitly acquired and released, whereas transactional locks "accumulate" till the end of a transaction and are released only with COMMIT, ROLLBACK and ROLLBACK TO SAVEPOINT, a concept of "sentinel" was introduced to MDL_context. All locks, HANDLER and others, reside in the same linked list. However, a selected element of the list separates locks with different life cycle. HANDLER locks always reside at the end of the list, after the sentinel. Transactional locks are prepended to the beginning of the list, before the sentinel. Thus, ROLLBACK, COMMIT or ROLLBACK TO SAVEPOINT, only release those locks that reside before the sentinel. HANDLER locks must be released explicitly as part of HANDLER CLOSE statement, or an implicit close. The same approach with sentinel is also employed for LOCK TABLES locks. Since HANDLER and LOCK TABLES statement has never worked together, the implementation is made simple and only maintains one sentinel, which is used either for HANDLER locks, or for LOCK TABLES locks.
2009-12-22 17:09:15 +01:00
open_tables= temporary_tables= derived_tables= 0;
extra_lock= lock= 0;
locked_tables_mode= LTM_NONE;
state_flags= 0U;
m_reprepare_observer= NULL;
}
};
/**
Storage for backup of Open_tables_state. Must
be used only to open system tables (TABLE_CATEGORY_SYSTEM
and TABLE_CATEGORY_LOG).
*/
class Open_tables_backup: public Open_tables_state
{
public:
/**
When we backup the open tables state to open a system
table or tables, points at the last metadata lock
acquired before the backup. Is used to release
metadata locks on system tables after they are
no longer used.
*/
MDL_ticket *mdl_system_tables_svp;
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class Sub_statement_state
@brief Used to save context when executing a function or trigger
*/
/* Defines used for Sub_statement_state::in_sub_stmt */
#define SUB_STMT_TRIGGER 1
#define SUB_STMT_FUNCTION 2
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
class Sub_statement_state
{
public:
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
ulonglong option_bits;
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
ulonglong first_successful_insert_id_in_prev_stmt;
ulonglong first_successful_insert_id_in_cur_stmt, insert_id_for_cur_row;
Discrete_interval auto_inc_interval_for_cur_row;
BUG#33029 5.0 to 5.1 replication fails on dup key when inserting using a trig in SP For all 5.0 and up to 5.1.12 exclusive, when a stored routine or trigger caused an INSERT into an AUTO_INCREMENT column, the generated AUTO_INCREMENT value should not be written into the binary log, which means if a statement does not generate AUTO_INCREMENT value itself, there will be no Intvar event (SET INSERT_ID) associated with it even if one of the stored routine or trigger caused generation of such a value. And meanwhile, when executing a stored routine or trigger, it would ignore the INSERT_ID value even if there is a INSERT_ID value available set by a SET INSERT_ID statement. Starting from MySQL 5.1.12, the generated AUTO_INCREMENT value is written into the binary log, and the value will be used if available when executing the stored routine or trigger. Prior fix of this bug in MySQL 5.0 and prior MySQL 5.1.12 (referenced as the buggy versions in the text below), when a statement that generates AUTO_INCREMENT value by the top statement was executed in the body of a SP, all statements in the SP after this statement would be treated as if they had generated AUTO_INCREMENT by the top statement. When a statement that did not generate AUTO_INCREMENT value by the top statement but by a function/trigger called by it, an erroneous Intvar event would be associated with the statement, this erroneous INSERT_ID value wouldn't cause problem when replicating between masters and slaves of 5.0.x or prior 5.1.12, because the erroneous INSERT_ID value was not used when executing functions/triggers. But when replicating from buggy versions to 5.1.12 or newer, which will use the INSERT_ID value in functions/triggers, the erroneous value will be used, which would cause duplicate entry error and cause the slave to stop. The patch for 5.1 fixed it to ignore the SET INSERT_ID value when executing functions/triggers if it is replicating from a master of buggy versions, another patch for 5.0 fixed it not to generate the erroneous Intvar event.
2008-03-14 04:35:41 +01:00
Discrete_intervals_list auto_inc_intervals_forced;
ulonglong limit_found_rows;
ha_rows cuted_fields, sent_row_count, examined_row_count;
ulong client_capabilities;
uint in_sub_stmt;
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
bool enable_slow_log;
2006-06-16 11:05:58 +02:00
bool last_insert_id_used;
SAVEPOINT *savepoints;
enum enum_check_fields count_cuted_fields;
};
/* Flags for the THD::system_thread variable */
enum enum_thread_type
{
NON_SYSTEM_THREAD= 0,
SYSTEM_THREAD_DELAYED_INSERT= 1,
SYSTEM_THREAD_SLAVE_IO= 2,
SYSTEM_THREAD_SLAVE_SQL= 4,
SYSTEM_THREAD_NDBCLUSTER_BINLOG= 8,
SYSTEM_THREAD_EVENT_SCHEDULER= 16,
SYSTEM_THREAD_EVENT_WORKER= 32
};
inline char const *
show_system_thread(enum_thread_type thread)
{
#define RETURN_NAME_AS_STRING(NAME) case (NAME): return #NAME
switch (thread) {
2008-08-25 14:23:49 +02:00
static char buf[64];
RETURN_NAME_AS_STRING(NON_SYSTEM_THREAD);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_DELAYED_INSERT);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_IO);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_SQL);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_NDBCLUSTER_BINLOG);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_SCHEDULER);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_WORKER);
2008-08-25 14:23:49 +02:00
default:
sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread);
return buf;
}
#undef RETURN_NAME_AS_STRING
}
2007-03-06 21:46:33 +01:00
/**
This class represents the interface for internal error handlers.
Internal error handlers are exception handlers used by the server
implementation.
*/
class Internal_error_handler
{
protected:
Internal_error_handler() :
m_prev_internal_handler(NULL)
{}
2007-03-06 21:46:33 +01:00
virtual ~Internal_error_handler() {}
public:
/**
Handle a sql condition.
2007-03-06 21:46:33 +01:00
This method can be implemented by a subclass to achieve any of the
following:
- mask a warning/error internally, prevent exposing it to the user,
- mask a warning/error and throw another one instead.
When this method returns true, the sql condition is considered
2007-03-06 21:46:33 +01:00
'handled', and will not be propagated to upper layers.
It is the responsability of the code installing an internal handler
to then check for trapped conditions, and implement logic to recover
from the anticipated conditions trapped during runtime.
This mechanism is similar to C++ try/throw/catch:
- 'try' correspond to <code>THD::push_internal_handler()</code>,
- 'throw' correspond to <code>my_error()</code>,
which invokes <code>my_message_sql()</code>,
- 'catch' correspond to checking how/if an internal handler was invoked,
before removing it from the exception stack with
<code>THD::pop_internal_handler()</code>.
@param thd the calling thread
@param cond the condition raised.
@return true if the condition is handled
2007-03-06 21:46:33 +01:00
*/
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
MYSQL_ERROR::enum_warning_level level,
const char* msg,
MYSQL_ERROR ** cond_hdl) = 0;
private:
Internal_error_handler *m_prev_internal_handler;
friend class THD;
};
/**
Implements the trivial error handler which cancels all error states
and prevents an SQLSTATE to be set.
*/
class Dummy_error_handler : public Internal_error_handler
{
public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
MYSQL_ERROR::enum_warning_level level,
const char* msg,
MYSQL_ERROR ** cond_hdl)
{
/* Ignore error */
return TRUE;
}
2007-03-06 21:46:33 +01:00
};
/**
This class is an internal error handler implementation for
DROP TABLE statements. The thing is that there may be warnings during
execution of these statements, which should not be exposed to the user.
This class is intended to silence such warnings.
*/
class Drop_table_error_handler : public Internal_error_handler
{
public:
Drop_table_error_handler() {}
public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
MYSQL_ERROR::enum_warning_level level,
const char* msg,
MYSQL_ERROR ** cond_hdl);
private:
};
/**
Tables that were locked with LOCK TABLES statement.
Encapsulates a list of TABLE_LIST instances for tables
locked by LOCK TABLES statement, memory root for metadata locks,
and, generally, the context of LOCK TABLES statement.
In LOCK TABLES mode, the locked tables are kept open between
statements.
Therefore, we can't allocate metadata locks on execution memory
root -- as well as tables, the locks need to stay around till
UNLOCK TABLES is called.
Backport of: ---------------------------------------------------------- revno: 2617.69.2 committer: Konstantin Osipov <kostja@sun.com> branch nick: 5.4-azalea-bugfixing timestamp: Mon 2009-08-03 19:26:04 +0400 message: A fix and a test case for Bug#45035 "Altering table under LOCK TABLES results in "Error 1213 Deadlock found...". If a user had a table locked with LOCK TABLES for READ and for WRITE in the same connection, ALTER TABLE could fail. Root cause analysis: If a connection issues LOCK TABLE t1 write, t1 a read, t1 b read; the new LOCK TABLES code in 6.0 (part of WL 3726) will create the following list of TABLE_LIST objects (thd->locked_tables_list->m_locked_tables): {"t1" "b" tl_read_no_insert}, {"t1" "a" tl_read_no_insert}, {"t1" "t1" tl_write } Later on, when we try to ALTER table t1, mysql_alter_table() closes all TABLE instances and releases its thr_lock locks, keeping only an exclusive metadata lock on t1. But when ALTER is finished, Locked_table_list::reopen_tables() tries to restore the original list of open and locked tables. Before this patch, it used to do so one by one: Open t1 b, get TL_READ_NO_INSERT lock, Open t1 a, get TL_READ_NO_INSERT lock Open t1, try to get TL_WRITE lock, deadlock. The cause of the deadlock is that thr_lock.c doesn't resolve the situation when the read list only consists of locks taken by the same thread, followed by this very thread trying to take a WRITE lock. Indeed, since thr_lock_multi always gets a sorted list of locks, WRITE locks always precede READ locks in the list to lock. Don't try to fix thr_lock.c deficiency, keep this code simple. Instead, try to take all thr_lock locks at once in ::reopen_tables().
2009-12-08 09:38:45 +01:00
The locks are allocated in the memory root encapsulated in this
class.
Some SQL commands, like FLUSH TABLE or ALTER TABLE, demand that
the tables they operate on are closed, at least temporarily.
This class encapsulates a list of TABLE_LIST instances, one
for each base table from LOCK TABLES list,
which helps conveniently close the TABLEs when it's necessary
and later reopen them.
Implemented in sql_base.cc
*/
class Locked_tables_list
{
private:
MEM_ROOT m_locked_tables_root;
TABLE_LIST *m_locked_tables;
TABLE_LIST **m_locked_tables_last;
Backport of: ---------------------------------------------------------- revno: 2617.69.2 committer: Konstantin Osipov <kostja@sun.com> branch nick: 5.4-azalea-bugfixing timestamp: Mon 2009-08-03 19:26:04 +0400 message: A fix and a test case for Bug#45035 "Altering table under LOCK TABLES results in "Error 1213 Deadlock found...". If a user had a table locked with LOCK TABLES for READ and for WRITE in the same connection, ALTER TABLE could fail. Root cause analysis: If a connection issues LOCK TABLE t1 write, t1 a read, t1 b read; the new LOCK TABLES code in 6.0 (part of WL 3726) will create the following list of TABLE_LIST objects (thd->locked_tables_list->m_locked_tables): {"t1" "b" tl_read_no_insert}, {"t1" "a" tl_read_no_insert}, {"t1" "t1" tl_write } Later on, when we try to ALTER table t1, mysql_alter_table() closes all TABLE instances and releases its thr_lock locks, keeping only an exclusive metadata lock on t1. But when ALTER is finished, Locked_table_list::reopen_tables() tries to restore the original list of open and locked tables. Before this patch, it used to do so one by one: Open t1 b, get TL_READ_NO_INSERT lock, Open t1 a, get TL_READ_NO_INSERT lock Open t1, try to get TL_WRITE lock, deadlock. The cause of the deadlock is that thr_lock.c doesn't resolve the situation when the read list only consists of locks taken by the same thread, followed by this very thread trying to take a WRITE lock. Indeed, since thr_lock_multi always gets a sorted list of locks, WRITE locks always precede READ locks in the list to lock. Don't try to fix thr_lock.c deficiency, keep this code simple. Instead, try to take all thr_lock locks at once in ::reopen_tables().
2009-12-08 09:38:45 +01:00
/** An auxiliary array used only in reopen_tables(). */
TABLE **m_reopen_array;
/**
Count the number of tables in m_locked_tables list. We can't
rely on thd->lock->table_count because it excludes
non-transactional temporary tables. We need to know
an exact number of TABLE objects.
*/
size_t m_locked_tables_count;
public:
Locked_tables_list()
:m_locked_tables(NULL),
Backport of: ---------------------------------------------------------- revno: 2617.69.2 committer: Konstantin Osipov <kostja@sun.com> branch nick: 5.4-azalea-bugfixing timestamp: Mon 2009-08-03 19:26:04 +0400 message: A fix and a test case for Bug#45035 "Altering table under LOCK TABLES results in "Error 1213 Deadlock found...". If a user had a table locked with LOCK TABLES for READ and for WRITE in the same connection, ALTER TABLE could fail. Root cause analysis: If a connection issues LOCK TABLE t1 write, t1 a read, t1 b read; the new LOCK TABLES code in 6.0 (part of WL 3726) will create the following list of TABLE_LIST objects (thd->locked_tables_list->m_locked_tables): {"t1" "b" tl_read_no_insert}, {"t1" "a" tl_read_no_insert}, {"t1" "t1" tl_write } Later on, when we try to ALTER table t1, mysql_alter_table() closes all TABLE instances and releases its thr_lock locks, keeping only an exclusive metadata lock on t1. But when ALTER is finished, Locked_table_list::reopen_tables() tries to restore the original list of open and locked tables. Before this patch, it used to do so one by one: Open t1 b, get TL_READ_NO_INSERT lock, Open t1 a, get TL_READ_NO_INSERT lock Open t1, try to get TL_WRITE lock, deadlock. The cause of the deadlock is that thr_lock.c doesn't resolve the situation when the read list only consists of locks taken by the same thread, followed by this very thread trying to take a WRITE lock. Indeed, since thr_lock_multi always gets a sorted list of locks, WRITE locks always precede READ locks in the list to lock. Don't try to fix thr_lock.c deficiency, keep this code simple. Instead, try to take all thr_lock locks at once in ::reopen_tables().
2009-12-08 09:38:45 +01:00
m_locked_tables_last(&m_locked_tables),
m_reopen_array(NULL),
m_locked_tables_count(0)
{
init_sql_alloc(&m_locked_tables_root, MEM_ROOT_BLOCK_SIZE, 0);
}
void unlock_locked_tables(THD *thd);
~Locked_tables_list()
{
unlock_locked_tables(0);
}
bool init_locked_tables(THD *thd);
TABLE_LIST *locked_tables() { return m_locked_tables; }
void unlink_from_list(THD *thd, TABLE_LIST *table_list,
bool remove_from_locked_tables);
Backport of: ---------------------------------------------------------- revno: 2617.69.2 committer: Konstantin Osipov <kostja@sun.com> branch nick: 5.4-azalea-bugfixing timestamp: Mon 2009-08-03 19:26:04 +0400 message: A fix and a test case for Bug#45035 "Altering table under LOCK TABLES results in "Error 1213 Deadlock found...". If a user had a table locked with LOCK TABLES for READ and for WRITE in the same connection, ALTER TABLE could fail. Root cause analysis: If a connection issues LOCK TABLE t1 write, t1 a read, t1 b read; the new LOCK TABLES code in 6.0 (part of WL 3726) will create the following list of TABLE_LIST objects (thd->locked_tables_list->m_locked_tables): {"t1" "b" tl_read_no_insert}, {"t1" "a" tl_read_no_insert}, {"t1" "t1" tl_write } Later on, when we try to ALTER table t1, mysql_alter_table() closes all TABLE instances and releases its thr_lock locks, keeping only an exclusive metadata lock on t1. But when ALTER is finished, Locked_table_list::reopen_tables() tries to restore the original list of open and locked tables. Before this patch, it used to do so one by one: Open t1 b, get TL_READ_NO_INSERT lock, Open t1 a, get TL_READ_NO_INSERT lock Open t1, try to get TL_WRITE lock, deadlock. The cause of the deadlock is that thr_lock.c doesn't resolve the situation when the read list only consists of locks taken by the same thread, followed by this very thread trying to take a WRITE lock. Indeed, since thr_lock_multi always gets a sorted list of locks, WRITE locks always precede READ locks in the list to lock. Don't try to fix thr_lock.c deficiency, keep this code simple. Instead, try to take all thr_lock locks at once in ::reopen_tables().
2009-12-08 09:38:45 +01:00
void unlink_all_closed_tables(THD *thd,
MYSQL_LOCK *lock,
size_t reopen_count);
bool reopen_tables(THD *thd);
};
/**
Storage engine specific thread local data.
*/
struct Ha_data
{
/**
Storage engine specific thread local data.
Lifetime: one user connection.
*/
void *ha_ptr;
/**
0: Life time: one statement within a transaction. If @@autocommit is
on, also represents the entire transaction.
@sa trans_register_ha()
1: Life time: one transaction within a connection.
If the storage engine does not participate in a transaction,
this should not be used.
@sa trans_register_ha()
*/
Ha_trx_info ha_info[2];
/**
NULL: engine is not bound to this thread
non-NULL: engine is bound to this thread, engine shutdown forbidden
*/
plugin_ref lock;
Ha_data() :ha_ptr(NULL) {}
};
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
/**
An instance of the global read lock in a connection.
Implemented in lock.cc.
*/
class Global_read_lock
{
public:
enum enum_grl_state
{
GRL_NONE,
GRL_ACQUIRED,
GRL_ACQUIRED_AND_BLOCKS_COMMIT
};
Global_read_lock()
:m_protection_count(0), m_state(GRL_NONE), m_mdl_global_shared_lock(NULL)
{}
bool lock_global_read_lock(THD *thd);
void unlock_global_read_lock(THD *thd);
bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh,
bool is_not_commit);
void start_waiting_global_read_lock(THD *thd);
bool make_global_read_lock_block_commit(THD *thd);
bool is_acquired() const { return m_state != GRL_NONE; }
bool has_protection() const { return m_protection_count > 0; }
MDL_ticket *global_shared_lock() const { return m_mdl_global_shared_lock; }
private:
uint m_protection_count; // GRL protection count
/**
In order to acquire the global read lock, the connection must
acquire a global shared metadata lock, to prohibit all DDL.
*/
enum_grl_state m_state;
MDL_ticket *m_mdl_global_shared_lock;
};
extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class THD
For each client connection we create a separate thread with THD serving as
a thread/connection descriptor
*/
class THD :public Statement,
public Open_tables_state
{
2000-07-31 21:29:14 +02:00
public:
MDL_context mdl_context;
/* Used to execute base64 coded binlog events in MySQL server */
Relay_log_info* rli_fake;
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
void reset_for_next_command();
/*
Constant for THD::where initialization in the beginning of every query.
It's needed because we do not save/restore THD::where normally during
primary (non subselect) query execution.
*/
static const char * const DEFAULT_WHERE;
#ifdef EMBEDDED_LIBRARY
struct st_mysql *mysql;
unsigned long client_stmt_id;
unsigned long client_param_count;
struct st_mysql_bind *client_params;
2003-10-06 13:32:38 +02:00
char *extra_data;
ulong extra_length;
struct st_mysql_data *cur_data;
struct st_mysql_data *first_data;
struct st_mysql_data **data_tail;
void clear_data_list();
struct st_mysql_data *alloc_new_dataset();
/*
In embedded server it points to the statement that is processed
in the current query. We store some results directly in statement
fields then.
*/
struct st_mysql_stmt *current_stmt;
#endif
#ifdef HAVE_QUERY_CACHE
Query_cache_tls query_cache_tls;
#endif
NET net; // client connection descriptor
Protocol *protocol; // Current protocol
Protocol_text protocol_text; // Normal protocol
Protocol_binary protocol_binary; // Binary protocol
HASH user_vars; // hash for user variables
String packet; // dynamic buffer for network I/O
String convert_buffer; // buffer for charset conversions
struct rand_struct rand; // used for authentication
struct system_variables variables; // Changeable local variables
struct system_status_var status_var; // Per thread statistic vars
struct system_status_var *initial_status_var; /* used by show status */
THR_LOCK_INFO lock_info; // Locking info of this thread
THR_LOCK_OWNER main_lock_id; // To use for conventional queries
THR_LOCK_OWNER *lock_id; // If not main_lock_id, points to
// the lock_id of a cursor.
/**
Protects THD data accessed from other threads:
- thd->query and thd->query_length (used by SHOW ENGINE
INNODB STATUS and SHOW PROCESSLIST
- thd->mysys_var (used by KILL statement and shutdown).
Is locked when THD is deleted.
*/
mysql_mutex_t LOCK_thd_data;
/* all prepared statements and cursors of this connection */
Statement_map stmt_map;
/*
A pointer to the stack frame of handle_one_connection(),
which is called first in the thread for handling a client
*/
char *thread_stack;
/**
Currently selected catalog.
*/
char *catalog;
2007-10-16 22:11:50 +02:00
/**
@note
Some members of THD (currently 'Statement::db',
'catalog' and 'query') are set and alloced by the slave SQL thread
(for the THD of that thread); that thread is (and must remain, for now)
the only responsible for freeing these 3 members. If you add members
here, and you add code to set them in replication, don't forget to
free_them_and_set_them_to_0 in replication properly. For details see
the 'err:' label of the handle_slave_sql() in sql/slave.cc.
2007-10-16 22:11:50 +02:00
@see handle_slave_sql
*/
Security_context main_security_ctx;
Security_context *security_ctx;
/*
Points to info-string that we show in SHOW PROCESSLIST
You are supposed to update thd->proc_info only if you have coded
a time-consuming piece that MySQL can get stuck in for a long time.
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
Set it using the thd_proc_info(THD *thread, const char *message)
macro/function.
2009-07-22 11:44:19 +02:00
This member is accessed and assigned without any synchronization.
Therefore, it may point only to constant (statically
allocated) strings, which memory won't go away over time.
*/
const char *proc_info;
/*
Used in error messages to tell user in what part of MySQL we found an
error. E. g. when where= "having clause", if fix_fields() fails, user
will know that the error was in having clause.
*/
const char *where;
ulong client_capabilities; /* What the client supports */
ulong max_client_packet_length;
HASH handler_tables_hash;
2003-12-04 17:12:01 +01:00
/*
One thread can hold up to one named user-level lock. This variable
points to a lock object if the lock is present. See item_func.cc and
chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK.
*/
User_level_lock *ull;
#ifndef DBUG_OFF
uint dbug_sentry; // watch out for memory corruption
#endif
2000-07-31 21:29:14 +02:00
struct st_my_thread_var *mysys_var;
/*
Type of current query: COM_STMT_PREPARE, COM_QUERY, etc. Set from
first byte of the packet in do_command()
*/
enum enum_server_command command;
uint32 server_id;
uint32 file_id; // for LOAD DATA INFILE
/* remote (peer) port */
uint16 peer_port;
time_t start_time, user_time;
// track down slow pthread_create
ulonglong prior_thr_create_utime, thr_create_utime;
ulonglong start_utime, utime_after_lock;
2000-07-31 21:29:14 +02:00
thr_lock_type update_lock_default;
Delayed_insert *di;
/* <> 0 if we are inside of trigger or stored function. */
uint in_sub_stmt;
2005-01-16 13:16:23 +01:00
/* container for handler's private per-connection data */
Ha_data ha_data[MAX_HA];
#ifndef MYSQL_CLIENT
int binlog_setup_trx_data();
/*
Public interface to write RBR events to the binlog
*/
void binlog_start_trans_and_stmt();
BUG#22864 (Rollback following CREATE... SELECT discards 'CREATE TABLE' from log): When row-based logging is used, the CREATE-SELECT is written as two parts: as a CREATE TABLE statement and as the rows for the table. For both transactional and non-transactional tables, the CREATE TABLE statement was written to the transaction cache, as were the rows, and on statement end, the entire transaction cache was written to the binary log if the table was non-transactional. For transactional tables, the events were kept in the transaction cache until end of transaction (or statement that were not part of a transaction). For the case when AUTOCOMMIT=0 and we are creating a transactional table using a create select, we would then keep the CREATE TABLE statement and the rows for the CREATE-SELECT, while executing the following statements. On a rollback, the transaction cache would then be cleared, which would also remove the CREATE TABLE statement. Hence no table would be created on the slave, while there is an empty table on the master. This relates to BUG#22865 where the table being created exists on the master, but not on the slave during insertion of rows into the newly created table. This occurs since the CREATE TABLE statement were still in the transaction cache until the statement finished executing, and possibly longer if the table was transactional. This patch changes the behaviour of the CREATE-SELECT statement by adding an implicit commit at the end of the statement when creating non-temporary tables. Hence, non-temporary tables will be written to the binary log on completion, and in the even of AUTOCOMMIT=0, a new transaction will be started. Temporary tables do not commit an ongoing transaction: neither as a pre- not a post-commit. The events for both transactional and non-transactional tables are saved in the transaction cache, and written to the binary log at end of the statement.
2006-12-21 09:29:02 +01:00
void binlog_set_stmt_begin();
int binlog_write_table_map(TABLE *table, bool is_transactional);
int binlog_write_row(TABLE* table, bool is_transactional,
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
MY_BITMAP const* cols, size_t colcnt,
const uchar *buf);
int binlog_delete_row(TABLE* table, bool is_transactional,
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
MY_BITMAP const* cols, size_t colcnt,
const uchar *buf);
int binlog_update_row(TABLE* table, bool is_transactional,
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
MY_BITMAP const* cols, size_t colcnt,
const uchar *old_data, const uchar *new_data);
void set_server_id(uint32 sid) { server_id = sid; }
/*
Member functions to handle pending event for row-level logging.
*/
template <class RowsEventT> Rows_log_event*
binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
MY_BITMAP const* cols,
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
size_t colcnt,
size_t needed,
2006-01-09 15:59:39 +01:00
bool is_transactional,
RowsEventT* hint);
Rows_log_event* binlog_get_pending_rows_event(bool is_transactional) const;
void binlog_set_pending_rows_event(Rows_log_event* ev, bool is_transactional);
inline int binlog_flush_pending_rows_event(bool stmt_end)
{
return (binlog_flush_pending_rows_event(stmt_end, FALSE) ||
binlog_flush_pending_rows_event(stmt_end, TRUE));
}
int binlog_flush_pending_rows_event(bool stmt_end, bool is_transactional);
int binlog_remove_pending_rows_event(bool clear_maps, bool is_transactional);
/**
Determine the binlog format of the current statement.
@retval 0 if the current statement will be logged in statement
format.
@retval nonzero if the current statement will be logged in row
format.
*/
int is_current_stmt_binlog_format_row() const {
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
DBUG_ASSERT(current_stmt_binlog_format == BINLOG_FORMAT_STMT ||
current_stmt_binlog_format == BINLOG_FORMAT_ROW);
return current_stmt_binlog_format == BINLOG_FORMAT_ROW;
}
BUG#53259 Unsafe statement binlogged in statement format w/MyIsam temp tables BUG#54872 MBR: replication failure caused by using tmp table inside transaction Changed criteria to classify a statement as unsafe in order to reduce the number of spurious warnings. So a statement is classified as unsafe when there is on-going transaction at any point of the execution if: 1. The mixed statement is about to update a transactional table and a non-transactional table. 2. The mixed statement is about to update a temporary transactional table and a non-transactional table. 3. The mixed statement is about to update a transactional table and read from a non-transactional table. 4. The mixed statement is about to update a temporary transactional table and read from a non-transactional table. 5. The mixed statement is about to update a non-transactional table and read from a transactional table when the isolation level is lower than repeatable read. After updating a transactional table if: 6. The mixed statement is about to update a non-transactional table and read from a temporary transactional table. 7. The mixed statement is about to update a non-transactional table and read from a temporary transactional table. 8. The mixed statement is about to update a non-transactionala table and read from a temporary non-transactional table. 9. The mixed statement is about to update a temporary non-transactional table and update a non-transactional table. 10. The mixed statement is about to update a temporary non-transactional table and read from a non-transactional table. 11. A statement is about to update a non-transactional table and the option variables.binlog_direct_non_trans_update is OFF. The reason for this is that locks acquired may not protected a concurrent transaction of interfering in the current execution and by consequence in the result. So the patch reduced the number of spurious unsafe warnings. Besides we fixed a regression caused by BUG#51894, which makes temporary tables to go into the trx-cache if there is an on-going transaction. In MIXED mode, the patch for BUG#51894 ignores that the trx-cache may have updates to temporary non-transactional tables that must be written to the binary log while rolling back the transaction. So we fix this problem by writing the content of the trx-cache to the binary log while rolling back a transaction if a non-transactional temporary table was updated and the binary logging format is MIXED.
2010-06-30 17:25:13 +02:00
enum enum_stmt_accessed_table
{
/*
If a transactional table is about to be read. Note that
a write implies a read.
*/
STMT_READS_TRANS_TABLE= 0,
/*
If a transactional table is about to be updated.
*/
STMT_WRITES_TRANS_TABLE,
/*
If a non-transactional table is about to be read. Note that
a write implies a read.
*/
STMT_READS_NON_TRANS_TABLE,
/*
If a non-transactional table is about to be updated.
*/
STMT_WRITES_NON_TRANS_TABLE,
/*
If a temporary transactional table is about to be read. Note
that a write implies a read.
*/
STMT_READS_TEMP_TRANS_TABLE,
/*
If a temporary transactional table is about to be updated.
*/
STMT_WRITES_TEMP_TRANS_TABLE,
/*
If a temporary non-transactional table is about to be read. Note
that a write implies a read.
*/
STMT_READS_TEMP_NON_TRANS_TABLE,
/*
If a temporary non-transactional table is about to be updated.
*/
STMT_WRITES_TEMP_NON_TRANS_TABLE,
/*
The last element of the enumeration. Please, if necessary add
anything before this.
*/
STMT_ACCESS_TABLE_COUNT
};
/**
Sets the type of table that is about to be accessed while executing a
statement.
@param accessed_table Enumeration type that defines the type of table,
e.g. temporary, transactional, non-transactional.
*/
inline void set_stmt_accessed_table(enum_stmt_accessed_table accessed_table)
{
DBUG_ENTER("THD::set_stmt_accessed_table");
DBUG_ASSERT(accessed_table >= 0 && accessed_table < STMT_ACCESS_TABLE_COUNT);
stmt_accessed_table_flag |= (1U << accessed_table);
DBUG_VOID_RETURN;
}
/**
Checks if a type of table is about to be accessed while executing a
statement.
@param accessed_table Enumeration type that defines the type of table,
e.g. temporary, transactional, non-transactional.
@return
@retval TRUE if the type of the table is about to be accessed
@retval FALSE otherwise
*/
inline bool stmt_accessed_table(enum_stmt_accessed_table accessed_table)
{
DBUG_ENTER("THD::stmt_accessed_table");
DBUG_ASSERT(accessed_table >= 0 && accessed_table < STMT_ACCESS_TABLE_COUNT);
DBUG_RETURN((stmt_accessed_table_flag & (1U << accessed_table)) != 0);
}
/**
Checks if a temporary table is about to be accessed while executing a
statement.
@return
@retval TRUE if a temporary table is about to be accessed
@retval FALSE otherwise
*/
inline bool stmt_accessed_temp_table()
{
DBUG_ENTER("THD::stmt_accessed_temp_table");
DBUG_RETURN((stmt_accessed_table_flag &
((1U << STMT_READS_TEMP_TRANS_TABLE) |
(1U << STMT_WRITES_TEMP_TRANS_TABLE) |
(1U << STMT_READS_TEMP_NON_TRANS_TABLE) |
(1U << STMT_WRITES_TEMP_NON_TRANS_TABLE))) != 0);
}
/**
Checks if a temporary non-transactional table is about to be accessed
while executing a statement.
@return
@retval TRUE if a temporary non-transactional table is about to be
accessed
@retval FALSE otherwise
*/
inline bool stmt_accessed_non_trans_temp_table()
{
DBUG_ENTER("THD::stmt_accessed_non_trans_temp_table");
DBUG_RETURN((stmt_accessed_table_flag &
((1U << STMT_READS_TEMP_NON_TRANS_TABLE) |
(1U << STMT_WRITES_TEMP_NON_TRANS_TABLE))) != 0);
}
private:
/**
Indicates the format in which the current statement will be
logged. This can only be set from @c decide_logging_format().
*/
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
enum_binlog_format current_stmt_binlog_format;
/**
Bit field for the state of binlog warnings.
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
There are two groups of bits:
- The first Lex::BINLOG_STMT_UNSAFE_COUNT bits list all types of
unsafeness that the current statement has.
- The following Lex::BINLOG_STMT_UNSAFE_COUNT bits list all types
of unsafeness that the current statement has issued warnings
for.
Hence, this variable must be big enough to hold
2*Lex::BINLOG_STMT_UNSAFE_COUNT bits. This is asserted in @c
issue_unsafe_warnings().
The first and second groups of bits are set by @c
decide_logging_format() when it detects that a warning should be
issued. The third group of bits is set from @c binlog_query()
when a warning is issued. All bits are cleared at the end of the
top-level statement.
This must be a member of THD and not of LEX, because warnings are
detected and issued in different places (@c
decide_logging_format() and @c binlog_query(), respectively).
Between these calls, the THD->lex object may change; e.g., if a
stored routine is invoked. Only THD persists between the calls.
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
*/
uint32 binlog_unsafe_warning_flags;
BUG#53259 Unsafe statement binlogged in statement format w/MyIsam temp tables BUG#54872 MBR: replication failure caused by using tmp table inside transaction Changed criteria to classify a statement as unsafe in order to reduce the number of spurious warnings. So a statement is classified as unsafe when there is on-going transaction at any point of the execution if: 1. The mixed statement is about to update a transactional table and a non-transactional table. 2. The mixed statement is about to update a temporary transactional table and a non-transactional table. 3. The mixed statement is about to update a transactional table and read from a non-transactional table. 4. The mixed statement is about to update a temporary transactional table and read from a non-transactional table. 5. The mixed statement is about to update a non-transactional table and read from a transactional table when the isolation level is lower than repeatable read. After updating a transactional table if: 6. The mixed statement is about to update a non-transactional table and read from a temporary transactional table. 7. The mixed statement is about to update a non-transactional table and read from a temporary transactional table. 8. The mixed statement is about to update a non-transactionala table and read from a temporary non-transactional table. 9. The mixed statement is about to update a temporary non-transactional table and update a non-transactional table. 10. The mixed statement is about to update a temporary non-transactional table and read from a non-transactional table. 11. A statement is about to update a non-transactional table and the option variables.binlog_direct_non_trans_update is OFF. The reason for this is that locks acquired may not protected a concurrent transaction of interfering in the current execution and by consequence in the result. So the patch reduced the number of spurious unsafe warnings. Besides we fixed a regression caused by BUG#51894, which makes temporary tables to go into the trx-cache if there is an on-going transaction. In MIXED mode, the patch for BUG#51894 ignores that the trx-cache may have updates to temporary non-transactional tables that must be written to the binary log while rolling back the transaction. So we fix this problem by writing the content of the trx-cache to the binary log while rolling back a transaction if a non-transactional temporary table was updated and the binary logging format is MIXED.
2010-06-30 17:25:13 +02:00
/**
Bit field that determines the type of tables that are about to be
be accessed while executing a statement.
*/
uint32 stmt_accessed_table_flag;
void issue_unsafe_warnings();
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
/*
Number of outstanding table maps, i.e., table maps in the
transaction cache.
*/
uint binlog_table_maps;
public:
uint get_binlog_table_maps() const {
return binlog_table_maps;
}
void clear_binlog_table_maps() {
binlog_table_maps= 0;
}
#endif /* MYSQL_CLIENT */
public:
2000-07-31 21:29:14 +02:00
struct st_transactions {
2005-01-16 13:16:23 +01:00
SAVEPOINT *savepoints;
THD_TRANS all; // Trans since BEGIN WORK
THD_TRANS stmt; // Trans for current statement
bool on; // see ha_enable_transaction()
XID_STATE xid_state;
Rows_log_event *m_pending_rows_event;
/*
Tables changed in transaction (that must be invalidated in query cache).
List contain only transactional tables, that not invalidated in query
cache (instead of full list of changed in transaction tables).
*/
CHANGED_TABLE_LIST* changed_tables;
MEM_ROOT mem_root; // Transaction-life memory allocation pool
void cleanup()
{
changed_tables= 0;
savepoints= 0;
/*
If rm_error is raised, it means that this piece of a distributed
transaction has failed and must be rolled back. But the user must
rollback it explicitly, so don't start a new distributed XA until
then.
*/
if (!xid_state.rm_error)
xid_state.xid.null();
free_root(&mem_root,MYF(MY_KEEP_PREALLOC));
}
2005-01-16 13:16:23 +01:00
st_transactions()
{
bzero((char*)this, sizeof(*this));
xid_state.xid.null();
2005-01-16 13:16:23 +01:00
init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
}
2000-07-31 21:29:14 +02:00
} transaction;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
Global_read_lock global_read_lock;
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
Field *dup_field;
2000-07-31 21:29:14 +02:00
#ifndef __WIN__
Fixed compiler warnings Fixed compile-pentium64 scripts Fixed wrong estimate of update_with_key_prefix in sql-bench Merge bk-internal.mysql.com:/home/bk/mysql-5.1 into mysql.com:/home/my/mysql-5.1 Fixed unsafe define of uint4korr() Fixed that --extern works with mysql-test-run.pl Small trivial cleanups This also fixes a bug in counting number of rows that are updated when we have many simultanous queries Move all connection handling and command exectuion main loop from sql_parse.cc to sql_connection.cc Split handle_one_connection() into reusable sub functions. Split create_new_thread() into reusable sub functions. Added thread_scheduler; Preliminary interface code for future thread_handling code. Use 'my_thread_id' for internal thread id's Make thr_alarm_kill() to depend on thread_id instead of thread Make thr_abort_locks_for_thread() depend on thread_id instead of thread In store_globals(), set my_thread_var->id to be thd->thread_id. Use my_thread_var->id as basis for my_thread_name() The above changes makes the connection we have between THD and threads more soft. Added a lot of DBUG_PRINT() and DBUG_ASSERT() functions Fixed compiler warnings Fixed core dumps when running with --debug Removed setting of signal masks (was never used) Made event code call pthread_exit() (portability fix) Fixed that event code doesn't call DBUG_xxx functions before my_thread_init() is called. Made handling of thread_id and thd->variables.pseudo_thread_id uniform. Removed one common 'not freed memory' warning from mysqltest Fixed a couple of usage of not initialized warnings (unlikely cases) Suppress compiler warnings from bdb and (for the moment) warnings from ndb
2007-02-23 12:13:55 +01:00
sigset_t signals;
2000-07-31 21:29:14 +02:00
#endif
#ifdef SIGNAL_WITH_VIO_CLOSE
Vio* active_vio;
#endif
/*
This is to track items changed during execution of a prepared
statement/stored procedure. It's created by
register_item_tree_change() in memory root of THD, and freed in
2005-07-15 11:21:08 +02:00
rollback_item_tree_changes(). For conventional execution it's always
empty.
*/
Item_change_list change_list;
/*
2005-07-15 11:21:08 +02:00
A permanent memory area of the statement. For conventional
execution, the parsed tree and execution runtime reside in the same
memory root. In this case stmt_arena points to THD. In case of
2005-07-15 11:21:08 +02:00
a prepared statement or a stored procedure statement, thd->mem_root
conventionally points to runtime memory, and thd->stmt_arena
2005-07-15 11:21:08 +02:00
points to the memory of the PS/SP, where the parsed tree of the
statement resides. Whenever you need to perform a permanent
transformation of a parsed tree, you should allocate new memory in
stmt_arena, to allow correct re-execution of PS/SP.
Note: in the parser, stmt_arena == thd, even for PS/SP.
*/
Query_arena *stmt_arena;
2008-08-26 12:01:49 +02:00
/*
map for tables that will be updated for a multi-table update query
statement, for other query statements, this will be zero.
*/
table_map table_map_for_update;
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
/* Tells if LAST_INSERT_ID(#) was called for the current statement */
bool arg_of_last_insert_id_function;
/*
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
ALL OVER THIS FILE, "insert_id" means "*automatically generated* value for
insertion into an auto_increment column".
*/
/*
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
This is the first autogenerated insert id which was *successfully*
inserted by the previous statement (exactly, if the previous statement
didn't successfully insert an autogenerated insert id, then it's the one
of the statement before, etc).
It can also be set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#).
It is returned by LAST_INSERT_ID().
*/
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
ulonglong first_successful_insert_id_in_prev_stmt;
/*
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
Variant of the above, used for storing in statement-based binlog. The
difference is that the one above can change as the execution of a stored
function progresses, while the one below is set once and then does not
change (which is the value which statement-based binlog needs).
*/
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
ulonglong first_successful_insert_id_in_prev_stmt_for_binlog;
/*
This is the first autogenerated insert id which was *successfully*
inserted by the current statement. It is maintained only to set
first_successful_insert_id_in_prev_stmt when statement ends.
*/
ulonglong first_successful_insert_id_in_cur_stmt;
/*
We follow this logic:
- when stmt starts, first_successful_insert_id_in_prev_stmt contains the
first insert id successfully inserted by the previous stmt.
- as stmt makes progress, handler::insert_id_for_cur_row changes;
every time get_auto_increment() is called,
auto_inc_intervals_in_cur_stmt_for_binlog is augmented with the
reserved interval (if statement-based binlogging).
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
- at first successful insertion of an autogenerated value,
first_successful_insert_id_in_cur_stmt is set to
handler::insert_id_for_cur_row.
- when stmt goes to binlog,
auto_inc_intervals_in_cur_stmt_for_binlog is binlogged if
non-empty.
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
- when stmt ends, first_successful_insert_id_in_prev_stmt is set to
first_successful_insert_id_in_cur_stmt.
*/
/*
stmt_depends_on_first_successful_insert_id_in_prev_stmt is set when
LAST_INSERT_ID() is used by a statement.
If it is set, first_successful_insert_id_in_prev_stmt_for_binlog will be
stored in the statement-based binlog.
This variable is CUMULATIVE along the execution of a stored function or
trigger: if one substatement sets it to 1 it will stay 1 until the
function/trigger ends, thus making sure that
first_successful_insert_id_in_prev_stmt_for_binlog does not change anymore
and is propagated to the caller for binlogging.
*/
bool stmt_depends_on_first_successful_insert_id_in_prev_stmt;
/*
List of auto_increment intervals reserved by the thread so far, for
storage in the statement-based binlog.
Note that its minimum is not first_successful_insert_id_in_cur_stmt:
assuming a table with an autoinc column, and this happens:
INSERT INTO ... VALUES(3);
SET INSERT_ID=3; INSERT IGNORE ... VALUES (NULL);
then the latter INSERT will insert no rows
(first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
in the binlog is still needed; the list's minimum will contain 3.
This variable is cumulative: if several statements are written to binlog
as one (stored functions or triggers are used) this list is the
concatenation of all intervals reserved by all statements.
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
*/
Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
/* Used by replication and SET INSERT_ID */
Discrete_intervals_list auto_inc_intervals_forced;
/*
There is BUG#19630 where statement-based replication of stored
functions/triggers with two auto_increment columns breaks.
We however ensure that it works when there is 0 or 1 auto_increment
column; our rules are
a) on master, while executing a top statement involving substatements,
first top- or sub- statement to generate auto_increment values wins the
exclusive right to see its values be written to binlog (the write
will be done by the statement or its caller), and the losers won't see
their values be written to binlog.
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
b) on slave, while replicating a top statement involving substatements,
first top- or sub- statement to need to read auto_increment values from
the master's binlog wins the exclusive right to read them (so the losers
won't read their values from binlog but instead generate on their own).
a) implies that we mustn't backup/restore
auto_inc_intervals_in_cur_stmt_for_binlog.
b) implies that we mustn't backup/restore auto_inc_intervals_forced.
If there are more than 1 auto_increment columns, then intervals for
different columns may mix into the
auto_inc_intervals_in_cur_stmt_for_binlog list, which is logically wrong,
but there is no point in preventing this mixing by preventing intervals
from the secondly inserted column to come into the list, as such
prevention would be wrong too.
What will happen in the case of
INSERT INTO t1 (auto_inc) VALUES(NULL);
where t1 has a trigger which inserts into an auto_inc column of t2, is
that in binlog we'll store the interval of t1 and the interval of t2 (when
we store intervals, soon), then in slave, t1 will use both intervals, t2
will use none; if t1 inserts the same number of rows as on master,
normally the 2nd interval will not be used by t1, which is fine. t2's
values will be wrong if t2's internal auto_increment counter is different
from what it was on master (which is likely). In 5.1, in mixed binlogging
mode, row-based binlogging is used for such cases where two
auto_increment columns are inserted.
*/
inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id_arg)
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
{
if (first_successful_insert_id_in_cur_stmt == 0)
first_successful_insert_id_in_cur_stmt= id_arg;
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
}
inline ulonglong read_first_successful_insert_id_in_prev_stmt(void)
{
if (!stmt_depends_on_first_successful_insert_id_in_prev_stmt)
{
/* It's the first time we read it */
first_successful_insert_id_in_prev_stmt_for_binlog=
first_successful_insert_id_in_prev_stmt;
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
}
return first_successful_insert_id_in_prev_stmt;
}
/*
Used by Intvar_log_event::do_apply_event() and by "SET INSERT_ID=#"
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
(mysqlbinlog). We'll soon add a variant which can take many intervals in
argument.
*/
inline void force_one_auto_inc_interval(ulonglong next_id)
{
2006-07-12 08:52:47 +02:00
auto_inc_intervals_forced.empty(); // in case of multiple SET INSERT_ID
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
auto_inc_intervals_forced.append(next_id, ULONGLONG_MAX, 0);
}
ulonglong limit_found_rows;
private:
/**
Stores the result of ROW_COUNT() function.
ROW_COUNT() function is a MySQL extention, but we try to keep it
similar to ROW_COUNT member of the GET DIAGNOSTICS stack of the SQL
standard (see SQL99, part 2, search for ROW_COUNT). It's value is
implementation defined for anything except INSERT, DELETE, UPDATE.
ROW_COUNT is assigned according to the following rules:
- In my_ok():
- for DML statements: to the number of affected rows;
- for DDL statements: to 0.
- In my_eof(): to -1 to indicate that there was a result set.
We derive this semantics from the JDBC specification, where int
java.sql.Statement.getUpdateCount() is defined to (sic) "return the
current result as an update count; if the result is a ResultSet
object or there are no more results, -1 is returned".
- In my_error(): to -1 to be compatible with the MySQL C API and
MySQL ODBC driver.
- For SIGNAL statements: to 0 per WL#2110 specification (see also
sql_signal.cc comment). Zero is used since that's the "default"
value of ROW_COUNT in the diagnostics area.
*/
longlong m_row_count_func; /* For the ROW_COUNT() function */
public:
inline longlong get_row_count_func() const
{
return m_row_count_func;
}
inline void set_row_count_func(longlong row_count_func)
{
m_row_count_func= row_count_func;
}
ha_rows cuted_fields;
/*
number of rows we actually sent to the client, including "synthetic"
rows in ROLLUP etc.
*/
ha_rows sent_row_count;
/**
Number of rows read and/or evaluated for a statement. Used for
slow log reporting.
An examined row is defined as a row that is read and/or evaluated
according to a statement condition, including in
create_sort_index(). Rows may be counted more than once, e.g., a
statement including ORDER BY could possibly evaluate the row in
filesort() before reading it for e.g. update.
*/
ha_rows examined_row_count;
/*
The set of those tables whose fields are referenced in all subqueries
of the query.
TODO: possibly this it is incorrect to have used tables in THD because
with more than one subquery, it is not clear what does the field mean.
*/
table_map used_tables;
USER_CONN *user_connect;
CHARSET_INFO *db_charset;
Warning_info *warning_info;
Diagnostics_area *stmt_da;
#if defined(ENABLED_PROFILING)
PROFILING profiling;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
#endif
/*
Id of current query. Statement can be reused to execute several queries
query_id is global in context of the whole MySQL server.
ID is automatically generated from mutex-protected counter.
It's used in handler code for various purposes: to check which columns
from table are necessary for this select, to check if it's necessary to
update auto-updatable fields (like auto_increment and timestamp).
*/
query_id_t query_id;
Fixed compiler warnings Fixed compile-pentium64 scripts Fixed wrong estimate of update_with_key_prefix in sql-bench Merge bk-internal.mysql.com:/home/bk/mysql-5.1 into mysql.com:/home/my/mysql-5.1 Fixed unsafe define of uint4korr() Fixed that --extern works with mysql-test-run.pl Small trivial cleanups This also fixes a bug in counting number of rows that are updated when we have many simultanous queries Move all connection handling and command exectuion main loop from sql_parse.cc to sql_connection.cc Split handle_one_connection() into reusable sub functions. Split create_new_thread() into reusable sub functions. Added thread_scheduler; Preliminary interface code for future thread_handling code. Use 'my_thread_id' for internal thread id's Make thr_alarm_kill() to depend on thread_id instead of thread Make thr_abort_locks_for_thread() depend on thread_id instead of thread In store_globals(), set my_thread_var->id to be thd->thread_id. Use my_thread_var->id as basis for my_thread_name() The above changes makes the connection we have between THD and threads more soft. Added a lot of DBUG_PRINT() and DBUG_ASSERT() functions Fixed compiler warnings Fixed core dumps when running with --debug Removed setting of signal masks (was never used) Made event code call pthread_exit() (portability fix) Fixed that event code doesn't call DBUG_xxx functions before my_thread_init() is called. Made handling of thread_id and thd->variables.pseudo_thread_id uniform. Removed one common 'not freed memory' warning from mysqltest Fixed a couple of usage of not initialized warnings (unlikely cases) Suppress compiler warnings from bdb and (for the moment) warnings from ndb
2007-02-23 12:13:55 +01:00
ulong col_access;
/* Statement id is thread-wide. This counter is used to generate ids */
ulong statement_id_counter;
ulong rand_saved_seed1, rand_saved_seed2;
Fixed compiler warnings Fixed compile-pentium64 scripts Fixed wrong estimate of update_with_key_prefix in sql-bench Merge bk-internal.mysql.com:/home/bk/mysql-5.1 into mysql.com:/home/my/mysql-5.1 Fixed unsafe define of uint4korr() Fixed that --extern works with mysql-test-run.pl Small trivial cleanups This also fixes a bug in counting number of rows that are updated when we have many simultanous queries Move all connection handling and command exectuion main loop from sql_parse.cc to sql_connection.cc Split handle_one_connection() into reusable sub functions. Split create_new_thread() into reusable sub functions. Added thread_scheduler; Preliminary interface code for future thread_handling code. Use 'my_thread_id' for internal thread id's Make thr_alarm_kill() to depend on thread_id instead of thread Make thr_abort_locks_for_thread() depend on thread_id instead of thread In store_globals(), set my_thread_var->id to be thd->thread_id. Use my_thread_var->id as basis for my_thread_name() The above changes makes the connection we have between THD and threads more soft. Added a lot of DBUG_PRINT() and DBUG_ASSERT() functions Fixed compiler warnings Fixed core dumps when running with --debug Removed setting of signal masks (was never used) Made event code call pthread_exit() (portability fix) Fixed that event code doesn't call DBUG_xxx functions before my_thread_init() is called. Made handling of thread_id and thd->variables.pseudo_thread_id uniform. Removed one common 'not freed memory' warning from mysqltest Fixed a couple of usage of not initialized warnings (unlikely cases) Suppress compiler warnings from bdb and (for the moment) warnings from ndb
2007-02-23 12:13:55 +01:00
pthread_t real_id; /* For debugging */
my_thread_id thread_id;
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
uint tmp_table;
uint server_status,open_options;
enum enum_thread_type system_thread;
uint select_number; //number of select (used for EXPLAIN)
/*
Current or next transaction isolation level.
When a connection is established, the value is taken from
@@session.tx_isolation (default transaction isolation for
the session), which is in turn taken from @@global.tx_isolation
(the global value).
If there is no transaction started, this variable
holds the value of the next transaction's isolation level.
When a transaction starts, the value stored in this variable
becomes "actual".
At transaction commit or rollback, we assign this variable
again from @@session.tx_isolation.
The only statement that can otherwise change the value
of this variable is SET TRANSACTION ISOLATION LEVEL.
Its purpose is to effect the isolation level of the next
transaction in this session. When this statement is executed,
the value in this variable is changed. However, since
this statement is only allowed when there is no active
transaction, this assignment (naturally) only affects the
upcoming transaction.
At the end of the current active transaction the value is
be reset again from @@session.tx_isolation, as described
above.
*/
enum_tx_isolation tx_isolation;
enum_check_fields count_cuted_fields;
DYNAMIC_ARRAY user_var_events; /* For user variables replication */
MEM_ROOT *user_var_events_alloc; /* Allocate above array elements here */
enum killed_state
{
NOT_KILLED=0,
KILL_BAD_DATA=1,
KILL_CONNECTION=ER_SERVER_SHUTDOWN,
KILL_QUERY=ER_QUERY_INTERRUPTED,
KILLED_NO_VALUE /* means neither of the states */
};
killed_state volatile killed;
/* scramble - random string sent to client on handshake */
2003-07-18 16:57:21 +02:00
char scramble[SCRAMBLE_LENGTH+1];
bool slave_thread, one_shot_set;
bool no_errors, password;
/**
Set to TRUE if execution of the current compound statement
can not continue. In particular, disables activation of
CONTINUE or EXIT handlers of stored routines.
Reset in the end of processing of the current user request, in
@see mysql_reset_thd_for_next_command().
*/
bool is_fatal_error;
/**
Set by a storage engine to request the entire
transaction (that possibly spans multiple engines) to
rollback. Reset in ha_rollback.
*/
bool transaction_rollback_request;
/**
TRUE if we are in a sub-statement and the current error can
not be safely recovered until we left the sub-statement mode.
In particular, disables activation of CONTINUE and EXIT
handlers inside sub-statements. E.g. if it is a deadlock
error and requires a transaction-wide rollback, this flag is
raised (traditionally, MySQL first has to close all the reads
via @see handler::ha_index_or_rnd_end() and only then perform
the rollback).
Reset to FALSE when we leave the sub-statement mode.
*/
bool is_fatal_sub_stmt_error;
bool query_start_used, rand_used, time_zone_used;
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
bool substitute_null_with_insert_id;
bool in_lock_tables;
/**
True if a slave error. Causes the slave to stop. Not the same
as the statement execution error (is_error()), since
a statement may be expected to return an error, e.g. because
it returned an error on master, and this is OK on the slave.
*/
bool is_slave_error;
bool bootstrap, cleanup_done;
/** is set if some thread specific value(s) used in a statement. */
bool thread_specific_used;
/**
is set if a statement accesses a temporary table created through
CREATE TEMPORARY TABLE.
*/
bool charset_is_system_charset, charset_is_collation_connection;
bool charset_is_character_set_filesystem;
bool enable_slow_log; /* enable slow log for current statement */
bool abort_on_warning;
2005-02-11 22:33:52 +01:00
bool got_warning; /* Set on call to push_warning() */
bool no_warnings_for_error; /* no warnings on call to my_error() */
/* set during loop of derived table processing */
bool derived_tables_processing;
my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */
sp_rcontext *spcont; // SP runtime context
sp_cache *sp_proc_cache;
sp_cache *sp_func_cache;
/** number of name_const() substitutions, see sp_head.cc:subst_spvars() */
uint query_name_consts;
/*
If we do a purge of binary logs, log index info of the threads
that are currently reading it needs to be adjusted. To do that
each thread that is using LOG_INFO needs to adjust the pointer to it
*/
LOG_INFO* current_linfo;
NET* slave_net; // network connection from slave -> m.
/* Used by the sys_var class to store temporary values */
union
{
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
my_bool my_bool_value;
long long_value;
ulong ulong_value;
ulonglong ulonglong_value;
} sys_var_tmp;
struct {
/*
If true, mysql_bin_log::write(Log_event) call will not write events to
binlog, and maintain 2 below variables instead (use
mysql_bin_log.start_union_events to turn this on)
*/
bool do_union;
/*
If TRUE, at least one mysql_bin_log::write(Log_event) call has been
made after last mysql_bin_log.start_union_events() call.
*/
bool unioned_events;
/*
If TRUE, at least one mysql_bin_log::write(Log_event e), where
e.cache_stmt == TRUE call has been made after last
mysql_bin_log.start_union_events() call.
*/
bool unioned_events_trans;
/*
'queries' (actually SP statements) that run under inside this binlog
union have thd->query_id >= first_query_id.
*/
query_id_t first_query_id;
} binlog_evt_union;
/**
Internal parser state.
Note that since the parser is not re-entrant, we keep only one parser
state here. This member is valid only when executing code during parsing.
*/
Parser_state *m_parser_state;
Locked_tables_list locked_tables_list;
2006-03-20 15:46:13 +01:00
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *work_part_info;
2006-03-20 15:46:13 +01:00
#endif
2006-04-13 09:50:33 +02:00
#ifndef EMBEDDED_LIBRARY
/**
Array of active audit plugins which have been used by this THD.
This list is later iterated to invoke release_thd() on those
plugins.
*/
DYNAMIC_ARRAY audit_class_plugins;
/**
Array of bits indicating which audit classes have already been
added to the list of audit plugins which are currently in use.
*/
unsigned long audit_class_mask[MYSQL_AUDIT_CLASS_MASK_SIZE];
#endif
#if defined(ENABLED_DEBUG_SYNC)
/* Debug Sync facility. See debug_sync.cc. */
struct st_debug_sync_control *debug_sync_control;
#endif /* defined(ENABLED_DEBUG_SYNC) */
2000-07-31 21:29:14 +02:00
THD();
~THD();
void init(void);
/*
Initialize memory roots necessary for query processing and (!)
pre-allocate memory for it. We can't do that in THD constructor because
there are use cases (acl_init, delayed inserts, watcher threads,
killing mysqld) where it's vital to not allocate excessive and not used
memory. Note, that we still don't return error from init_for_queries():
if preallocation fails, we should notice that at the first call to
alloc_root.
*/
void init_for_queries();
void change_user(void);
void cleanup(void);
void cleanup_after_query();
2000-07-31 21:29:14 +02:00
bool store_globals();
#ifdef SIGNAL_WITH_VIO_CLOSE
inline void set_active_vio(Vio* vio)
{
mysql_mutex_lock(&LOCK_thd_data);
active_vio = vio;
mysql_mutex_unlock(&LOCK_thd_data);
}
inline void clear_active_vio()
{
mysql_mutex_lock(&LOCK_thd_data);
active_vio = 0;
mysql_mutex_unlock(&LOCK_thd_data);
}
void close_active_vio();
#endif
void awake(THD::killed_state state_to_set);
2007-05-29 21:17:09 +02:00
#ifndef MYSQL_CLIENT
enum enum_binlog_query_type {
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
/* The query can be logged in row format or in statement format. */
2007-05-29 21:17:09 +02:00
ROW_QUERY_TYPE,
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
/* The query has to be logged in statement format. */
2007-05-29 21:17:09 +02:00
STMT_QUERY_TYPE,
QUERY_TYPE_COUNT
};
int binlog_query(enum_binlog_query_type qtype,
char const *query, ulong query_len, bool is_trans,
bool direct, bool suppress_use,
int errcode);
2007-05-29 21:17:09 +02:00
#endif
/*
For enter_cond() / exit_cond() to work the mutex must be got before
enter_cond(); this mutex is then released by exit_cond().
Usage must be: lock mutex; enter_cond(); your code; exit_cond().
*/
inline const char* enter_cond(mysql_cond_t *cond, mysql_mutex_t* mutex,
const char* msg)
{
const char* old_msg = proc_info;
mysql_mutex_assert_owner(mutex);
mysys_var->current_mutex = mutex;
mysys_var->current_cond = cond;
proc_info = msg;
return old_msg;
}
inline void exit_cond(const char* old_msg)
{
/*
Putting the mutex unlock in thd->exit_cond() ensures that
mysys_var->current_mutex is always unlocked _before_ mysys_var->mutex is
locked (if that would not be the case, you'll get a deadlock if someone
does a THD::awake() on you).
*/
mysql_mutex_unlock(mysys_var->current_mutex);
mysql_mutex_lock(&mysys_var->mutex);
mysys_var->current_mutex = 0;
mysys_var->current_cond = 0;
proc_info = old_msg;
mysql_mutex_unlock(&mysys_var->mutex);
return;
}
2000-07-31 21:29:14 +02:00
inline time_t query_start() { query_start_used=1; return start_time; }
inline void set_time()
{
if (user_time)
{
start_time= user_time;
start_utime= utime_after_lock= my_micro_time();
}
else
start_utime= utime_after_lock= my_micro_time_and_time(&start_time);
}
inline void set_current_time() { start_time= my_time(MY_WME); }
inline void set_time(time_t t)
{
start_time= user_time= t;
start_utime= utime_after_lock= my_micro_time();
}
/*TODO: this will be obsolete when we have support for 64 bit my_time_t */
inline bool is_valid_time()
{
return (start_time < (time_t) MY_TIME_T_MAX);
}
void set_time_after_lock() { utime_after_lock= my_micro_time(); }
ulonglong current_utime() { return my_micro_time(); }
inline ulonglong found_rows(void)
{
return limit_found_rows;
2005-01-16 13:16:23 +01:00
}
/**
Returns TRUE if session is in a multi-statement transaction mode.
OPTION_NOT_AUTOCOMMIT: When autocommit is off, a multi-statement
transaction is implicitly started on the first statement after a
previous transaction has been ended.
OPTION_BEGIN: Regardless of the autocommit status, a multi-statement
transaction can be explicitly started with the statements "START
TRANSACTION", "BEGIN [WORK]", "[COMMIT | ROLLBACK] AND CHAIN", etc.
Note: this doesn't tell you whether a transaction is active.
A session can be in multi-statement transaction mode, and yet
have no active transaction, e.g., in case of:
set @@autocommit=0;
set @a= 3; <-- these statements don't
set transaction isolation level serializable; <-- start an active
flush tables; <-- transaction
I.e. for the above scenario this function returns TRUE, even
though no active transaction has begun.
@sa in_active_multi_stmt_transaction()
*/
inline bool in_multi_stmt_transaction_mode()
{
2010-01-12 13:07:09 +01:00
return variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN);
}
/**
TRUE if the session is in a multi-statement transaction mode
(@sa in_multi_stmt_transaction_mode()) *and* there is an
active transaction, i.e. there is an explicit start of a
transaction with BEGIN statement, or implicit with a
statement that uses a transactional engine.
For example, these scenarios don't start an active transaction
(even though the server is in multi-statement transaction mode):
set @@autocommit=0;
select * from nontrans_table;
set @var=TRUE;
flush tables;
Note, that even for a statement that starts a multi-statement
transaction (i.e. select * from trans_table), this
flag won't be set until we open the statement's tables
and the engines register themselves for the transaction
(see trans_register_ha()),
hence this method is reliable to use only after
open_tables() has completed.
Why do we need a flag?
----------------------
We need to maintain a (at first glance redundant)
session flag, rather than looking at thd->transaction.all.ha_list
because of explicit start of a transaction with BEGIN.
I.e. in case of
BEGIN;
select * from nontrans_t1; <-- in_active_multi_stmt_transaction() is true
*/
inline bool in_active_multi_stmt_transaction()
{
return server_status & SERVER_STATUS_IN_TRANS;
}
inline bool fill_derived_tables()
{
return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();
}
inline bool fill_information_schema_tables()
{
return !stmt_arena->is_stmt_prepare();
}
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
inline void* trans_alloc(unsigned int size)
{
return alloc_root(&transaction.mem_root,size);
}
LEX_STRING *make_lex_string(LEX_STRING *lex_str,
const char* str, uint length,
bool allocate_lex_string);
bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
const char *from, uint from_length,
CHARSET_INFO *from_cs);
bool convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs);
void add_changed_table(TABLE *table);
void add_changed_table(const char *key, long key_length);
CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length);
int send_explain_fields(select_result *result);
/**
Clear the current error, if any.
We do not clear is_fatal_error or is_fatal_sub_stmt_error since we
assume this is never called if the fatal error is set.
@todo: To silence an error, one should use Internal_error_handler
mechanism. In future this function will be removed.
*/
2002-11-03 23:56:25 +01:00
inline void clear_error()
{
DBUG_ENTER("clear_error");
if (stmt_da->is_error())
stmt_da->reset_diagnostics_area();
is_slave_error= 0;
DBUG_VOID_RETURN;
2002-11-03 23:56:25 +01:00
}
#ifndef EMBEDDED_LIBRARY
inline bool vio_ok() const { return net.vio != 0; }
/** Return FALSE if connection to client is broken. */
bool is_connected()
{
/*
All system threads (e.g., the slave IO thread) are connected but
not using vio. So this function always returns true for all
system threads.
*/
return system_thread || (vio_ok() ? vio_is_connected(net.vio) : FALSE);
}
#else
inline bool vio_ok() const { return TRUE; }
inline bool is_connected() { return TRUE; }
#endif
/**
Mark the current error as fatal. Warning: this does not
set any error, it sets a property of the error, so must be
followed or prefixed with my_error().
*/
inline void fatal_error()
{
DBUG_ASSERT(stmt_da->is_error() || killed);
is_fatal_error= 1;
2003-04-02 15:16:19 +02:00
DBUG_PRINT("error",("Fatal error set"));
}
/**
TRUE if there is an error in the error stack.
Please use this method instead of direct access to
net.report_error.
If TRUE, the current (sub)-statement should be aborted.
The main difference between this member and is_fatal_error
is that a fatal error can not be handled by a stored
procedure continue handler, whereas a normal error can.
To raise this flag, use my_error().
*/
inline bool is_error() const { return stmt_da->is_error(); }
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
inline Query_arena *activate_stmt_arena_if_needed(Query_arena *backup)
{
/*
Use the persistent arena if we are in a prepared statement or a stored
procedure statement and we have not already changed to use this arena.
*/
if (!stmt_arena->is_conventional() && mem_root != stmt_arena->mem_root)
{
set_n_backup_active_arena(stmt_arena, backup);
return stmt_arena;
}
return 0;
}
void change_item_tree(Item **place, Item *new_value)
{
/* TODO: check for OOM condition here */
if (!stmt_arena->is_conventional())
nocheck_register_item_tree_change(place, *place, mem_root);
*place= new_value;
}
void nocheck_register_item_tree_change(Item **place, Item *old_value,
MEM_ROOT *runtime_memroot);
void rollback_item_tree_changes();
/*
Cleanup statement parse state (parse tree, lex) and execution
state after execution of a non-prepared SQL statement.
*/
void end_statement();
inline int killed_errno() const
{
killed_state killed_val; /* to cache the volatile 'killed' */
return (killed_val= killed) != KILL_BAD_DATA ? killed_val : 0;
}
inline void send_kill_message() const
{
2004-11-12 13:34:00 +01:00
int err= killed_errno();
if (err)
{
if ((err == KILL_CONNECTION) && !shutdown_in_progress)
err = KILL_QUERY;
my_message(err, ER(err), MYF(0));
}
}
/* return TRUE if we will abort query if we make a warning now */
inline bool really_abort_on_warning()
{
return (abort_on_warning &&
(pushing for Andrei) Bug #27417 thd->no_trans_update.stmt lost value inside of SF-exec-stack Once had been set the flag might later got reset inside of a stored routine execution stack. The reason was in that there was no check if a new statement started at time of resetting. The artifact affects most of binlogable DML queries. Notice, that multi-update is wrapped up within bug@27716 fix, multi-delete bug@29136. Fixed with saving parent's statement flag of whether the statement modified non-transactional table, and unioning (merging) the value with that was gained in mysql_execute_command. Resettling thd->no_trans_update members into thd->transaction.`member`; Asserting code; Effectively the following properties are held. 1. At the end of a substatement thd->transaction.stmt.modified_non_trans_table reflects the fact if such a table got modified by the substatement. That also respects THD::really_abort_on_warnin() requirements. 2. Eventually thd->transaction.stmt.modified_non_trans_table will be computed as the union of the values of all invoked sub-statements. That fixes this bug#27417; Computing of thd->transaction.all.modified_non_trans_table is refined to base to the stmt's value for all the case including insert .. select statement which before the patch had an extra issue bug@28960. Minor issues are covered with mysql_load, mysql_delete, and binloggin of insert in to temp_table select. The supplied test verifies limitely, mostly asserts. The ultimate testing is defered for bug@13270, bug@23333.
2007-07-30 17:27:36 +02:00
(!transaction.stmt.modified_non_trans_table ||
(variables.sql_mode & MODE_STRICT_ALL_TABLES)));
}
void set_status_var_init();
bool is_context_analysis_only()
{ return stmt_arena->is_stmt_prepare() || lex->view_prepare_mode; }
void reset_n_backup_open_tables_state(Open_tables_backup *backup);
void restore_backup_open_tables_state(Open_tables_backup *backup);
void reset_sub_statement_state(Sub_statement_state *backup, uint new_state);
void restore_sub_statement_state(Sub_statement_state *backup);
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
void restore_active_arena(Query_arena *set, Query_arena *backup);
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
/*
@todo Make these methods private or remove them completely. Only
decide_logging_format should call them. /Sven
*/
inline void set_current_stmt_binlog_format_row_if_mixed()
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement), and new binlog format called "mixed" (which is statement-based except if only row-based is correct, in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release): SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default; the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha. It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below). The added tests test the possibility or impossibility to SET, their effects, and the mixed mode, including in prepared statements and in stored procedures and functions. Caveats: a) The mixed mode will not work for stored functions: in mixed mode, a stored function will always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()). b) for the same reason, changing the thread's binlog format inside a stored function is refused with an error message. c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask Dmitri). Additionally, as the binlog format is now changeable by each user for his session, I remove the implication which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1 (not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled phantom protection). Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
{
DBUG_ENTER("set_current_stmt_binlog_format_row_if_mixed");
/*
This should only be called from decide_logging_format.
@todo Once we have ensured this, uncomment the following
statement, remove the big comment below that, and remove the
in_sub_stmt==0 condition from the following 'if'.
*/
/* DBUG_ASSERT(in_sub_stmt == 0); */
* Mixed replication mode * : 1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks statement-based binlog": a stored function inserting into two such tables may fail to replicate (inserting wrong data in the slave's copy of the second table) if the slave's second table had an internal auto_increment counter different from master's. Because the auto_increment value autogenerated by master for the 2nd table does not go into binlog, only the first does, so the slave lacks information. To fix this, if running in mixed binlogging mode, if the stored function or trigger plans to update two different tables both having auto_increment columns, we switch to row-based for the whole function. We don't have a simple solution for statement-based binlogging mode, there the bug remains and will be documented as a known problem. Re-enabling rpl_switch_stm_row_mixed. 2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored functions, triggers, views", which was a documented limitation (in mixed mode, we didn't detect that a stored function's execution needed row-based binlogging (due to some UUID() call for example); same for triggers, same for views (a view created from a SELECT UUID(), and doing INSERT INTO sometable SELECT theview; would not replicate row-based). This is implemented by, after parsing a routine's body, remembering in sp_head that this routine needs row-based binlogging. Then when this routine is used, the caller is marked to require row-based binlogging too. Same for views: when we parse a view and detect that its SELECT needs row-based binary logging, we mark the calling LEX as such. 3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog": a temporary table containing e.g. UUID has its changes not binlogged, so any query updating a permanent table with data from the temporary table will run wrongly on slave. Solution: in mixed mode we don't switch back from row-based to statement-based when there exists temporary tables. 4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog; impossible due to BUG#11312 and BUG#20329, but test is in place for when they are fixed.
2006-07-09 17:00:47 +02:00
/*
If in a stored/function trigger, the caller should already have done the
change. We test in_sub_stmt to prevent introducing bugs where people
wouldn't ensure that, and would switch to row-based mode in the middle
of executing a stored function/trigger (which is too late, see also
reset_current_stmt_binlog_format_row()); this condition will make their
* Mixed replication mode * : 1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks statement-based binlog": a stored function inserting into two such tables may fail to replicate (inserting wrong data in the slave's copy of the second table) if the slave's second table had an internal auto_increment counter different from master's. Because the auto_increment value autogenerated by master for the 2nd table does not go into binlog, only the first does, so the slave lacks information. To fix this, if running in mixed binlogging mode, if the stored function or trigger plans to update two different tables both having auto_increment columns, we switch to row-based for the whole function. We don't have a simple solution for statement-based binlogging mode, there the bug remains and will be documented as a known problem. Re-enabling rpl_switch_stm_row_mixed. 2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored functions, triggers, views", which was a documented limitation (in mixed mode, we didn't detect that a stored function's execution needed row-based binlogging (due to some UUID() call for example); same for triggers, same for views (a view created from a SELECT UUID(), and doing INSERT INTO sometable SELECT theview; would not replicate row-based). This is implemented by, after parsing a routine's body, remembering in sp_head that this routine needs row-based binlogging. Then when this routine is used, the caller is marked to require row-based binlogging too. Same for views: when we parse a view and detect that its SELECT needs row-based binary logging, we mark the calling LEX as such. 3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog": a temporary table containing e.g. UUID has its changes not binlogged, so any query updating a permanent table with data from the temporary table will run wrongly on slave. Solution: in mixed mode we don't switch back from row-based to statement-based when there exists temporary tables. 4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog; impossible due to BUG#11312 and BUG#20329, but test is in place for when they are fixed.
2006-07-09 17:00:47 +02:00
tests fail and so force them to propagate the
lex->binlog_row_based_if_mixed upwards to the caller.
*/
if ((variables.binlog_format == BINLOG_FORMAT_MIXED) &&
(in_sub_stmt == 0))
set_current_stmt_binlog_format_row();
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
DBUG_VOID_RETURN;
}
inline void set_current_stmt_binlog_format_row()
{
DBUG_ENTER("set_current_stmt_binlog_format_row");
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
current_stmt_binlog_format= BINLOG_FORMAT_ROW;
DBUG_VOID_RETURN;
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement), and new binlog format called "mixed" (which is statement-based except if only row-based is correct, in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release): SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default; the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha. It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below). The added tests test the possibility or impossibility to SET, their effects, and the mixed mode, including in prepared statements and in stored procedures and functions. Caveats: a) The mixed mode will not work for stored functions: in mixed mode, a stored function will always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()). b) for the same reason, changing the thread's binlog format inside a stored function is refused with an error message. c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask Dmitri). Additionally, as the binlog format is now changeable by each user for his session, I remove the implication which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1 (not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled phantom protection). Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
}
inline void clear_current_stmt_binlog_format_row()
{
DBUG_ENTER("clear_current_stmt_binlog_format_row");
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
current_stmt_binlog_format= BINLOG_FORMAT_STMT;
DBUG_VOID_RETURN;
}
inline void reset_current_stmt_binlog_format_row()
WL#2977 and WL#2712 global and session-level variable to set the binlog format (row/statement), and new binlog format called "mixed" (which is statement-based except if only row-based is correct, in this cset it means if UDF or UUID is used; more cases could be added in later 5.1 release): SET GLOBAL|SESSION BINLOG_FORMAT=row|statement|mixed|default; the global default is statement unless cluster is enabled (then it's row) as in 5.1-alpha. It's not possible to use SET on this variable if a session is currently in row-based mode and has open temporary tables (because CREATE TEMPORARY TABLE was not binlogged so temp table is not known on slave), or if NDB is enabled (because NDB does not support such change on-the-fly, though it will later), of if in a stored function (see below). The added tests test the possibility or impossibility to SET, their effects, and the mixed mode, including in prepared statements and in stored procedures and functions. Caveats: a) The mixed mode will not work for stored functions: in mixed mode, a stored function will always be binlogged as one call and in a statement-based way (e.g. INSERT VALUES(myfunc()) or SELECT myfunc()). b) for the same reason, changing the thread's binlog format inside a stored function is refused with an error message. c) the same problems apply to triggers; implementing b) for triggers will be done later (will ask Dmitri). Additionally, as the binlog format is now changeable by each user for his session, I remove the implication which was done at startup, where row-based automatically set log-bin-trust-routine-creators to 1 (not possible anymore as a user can now switch to stmt-based and do nasty things again), and automatically set --innodb-locks-unsafe-for-binlog to 1 (was anyway theoretically incorrect as it disabled phantom protection). Plus fixes for compiler warnings.
2006-02-25 22:21:03 +01:00
{
DBUG_ENTER("reset_current_stmt_binlog_format_row");
* Mixed replication mode * : 1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks statement-based binlog": a stored function inserting into two such tables may fail to replicate (inserting wrong data in the slave's copy of the second table) if the slave's second table had an internal auto_increment counter different from master's. Because the auto_increment value autogenerated by master for the 2nd table does not go into binlog, only the first does, so the slave lacks information. To fix this, if running in mixed binlogging mode, if the stored function or trigger plans to update two different tables both having auto_increment columns, we switch to row-based for the whole function. We don't have a simple solution for statement-based binlogging mode, there the bug remains and will be documented as a known problem. Re-enabling rpl_switch_stm_row_mixed. 2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored functions, triggers, views", which was a documented limitation (in mixed mode, we didn't detect that a stored function's execution needed row-based binlogging (due to some UUID() call for example); same for triggers, same for views (a view created from a SELECT UUID(), and doing INSERT INTO sometable SELECT theview; would not replicate row-based). This is implemented by, after parsing a routine's body, remembering in sp_head that this routine needs row-based binlogging. Then when this routine is used, the caller is marked to require row-based binlogging too. Same for views: when we parse a view and detect that its SELECT needs row-based binary logging, we mark the calling LEX as such. 3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog": a temporary table containing e.g. UUID has its changes not binlogged, so any query updating a permanent table with data from the temporary table will run wrongly on slave. Solution: in mixed mode we don't switch back from row-based to statement-based when there exists temporary tables. 4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog; impossible due to BUG#11312 and BUG#20329, but test is in place for when they are fixed.
2006-07-09 17:00:47 +02:00
/*
If there are temporary tables, don't reset back to
statement-based. Indeed it could be that:
CREATE TEMPORARY TABLE t SELECT UUID(); # row-based
# and row-based does not store updates to temp tables
# in the binlog.
INSERT INTO u SELECT * FROM t; # stmt-based
and then the INSERT will fail as data inserted into t was not logged.
So we continue with row-based until the temp table is dropped.
If we are in a stored function or trigger, we mustn't reset in the
middle of its execution (as the binary logging way of a stored function
or trigger is decided when it starts executing, depending for example on
the caller (for a stored function: if caller is SELECT or
INSERT/UPDATE/DELETE...).
*/
DBUG_PRINT("debug",
("temporary_tables: %s, in_sub_stmt: %s, system_thread: %s",
YESNO(temporary_tables), YESNO(in_sub_stmt),
show_system_thread(system_thread)));
BUG#51894 Replication failure with SBR on DROP TEMPORARY TABLE inside a transaction BUG#52616 Temp table prevents switch binlog format from STATEMENT to ROW Before the WL#2687 and BUG#46364, every non-transactional change that happened after a transactional change was written to trx-cache and flushed upon committing the transaction. WL#2687 and BUG#46364 changed this behavior and non-transactional changes are now written to the binary log upon committing the statement. A binary log event is identified as transactional or non-transactional through a flag in the Log_event which is set taking into account the underlie storage engine on what it is stems from. In the current bug, this flag was not being set properly when the DROP TEMPORARY TABLE was executed. However, while fixing this bug we figured out that changes to temporary tables should be always written to the trx-cache if there is an on-going transaction. Otherwise, binlog events in the reversed order would be produced. Regarding concurrency, keeping changes to temporary tables in the trx-cache is also safe as temporary tables are only visible to the owner connection. In this patch, we classify the following statements as unsafe: 1 - INSERT INTO t_myisam SELECT * FROM t_myisam_temp 2 - INSERT INTO t_myisam_temp SELECT * FROM t_myisam 3 - CREATE TEMPORARY TABLE t_myisam_temp SELECT * FROM t_myisam On the other hand, the following statements are classified as safe: 1 - INSERT INTO t_innodb SELECT * FROM t_myisam_temp 2 - INSERT INTO t_myisam_temp SELECT * FROM t_innodb The patch also guarantees that transactions that have a DROP TEMPORARY are always written to the binary log regardless of the mode and the outcome: commit or rollback. In particular, the DROP TEMPORARY is extended with the IF EXISTS clause when the current statement logging format is set to row. Finally, the patch allows to switch from STATEMENT to MIXED/ROW when there are temporary tables but the contrary is not possible.
2010-04-20 11:10:43 +02:00
if (in_sub_stmt == 0)
* Mixed replication mode * : 1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks statement-based binlog": a stored function inserting into two such tables may fail to replicate (inserting wrong data in the slave's copy of the second table) if the slave's second table had an internal auto_increment counter different from master's. Because the auto_increment value autogenerated by master for the 2nd table does not go into binlog, only the first does, so the slave lacks information. To fix this, if running in mixed binlogging mode, if the stored function or trigger plans to update two different tables both having auto_increment columns, we switch to row-based for the whole function. We don't have a simple solution for statement-based binlogging mode, there the bug remains and will be documented as a known problem. Re-enabling rpl_switch_stm_row_mixed. 2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored functions, triggers, views", which was a documented limitation (in mixed mode, we didn't detect that a stored function's execution needed row-based binlogging (due to some UUID() call for example); same for triggers, same for views (a view created from a SELECT UUID(), and doing INSERT INTO sometable SELECT theview; would not replicate row-based). This is implemented by, after parsing a routine's body, remembering in sp_head that this routine needs row-based binlogging. Then when this routine is used, the caller is marked to require row-based binlogging too. Same for views: when we parse a view and detect that its SELECT needs row-based binary logging, we mark the calling LEX as such. 3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog": a temporary table containing e.g. UUID has its changes not binlogged, so any query updating a permanent table with data from the temporary table will run wrongly on slave. Solution: in mixed mode we don't switch back from row-based to statement-based when there exists temporary tables. 4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog; impossible due to BUG#11312 and BUG#20329, but test is in place for when they are fixed.
2006-07-09 17:00:47 +02:00
{
if (variables.binlog_format == BINLOG_FORMAT_ROW)
set_current_stmt_binlog_format_row();
BUG#51894 Replication failure with SBR on DROP TEMPORARY TABLE inside a transaction BUG#52616 Temp table prevents switch binlog format from STATEMENT to ROW Before the WL#2687 and BUG#46364, every non-transactional change that happened after a transactional change was written to trx-cache and flushed upon committing the transaction. WL#2687 and BUG#46364 changed this behavior and non-transactional changes are now written to the binary log upon committing the statement. A binary log event is identified as transactional or non-transactional through a flag in the Log_event which is set taking into account the underlie storage engine on what it is stems from. In the current bug, this flag was not being set properly when the DROP TEMPORARY TABLE was executed. However, while fixing this bug we figured out that changes to temporary tables should be always written to the trx-cache if there is an on-going transaction. Otherwise, binlog events in the reversed order would be produced. Regarding concurrency, keeping changes to temporary tables in the trx-cache is also safe as temporary tables are only visible to the owner connection. In this patch, we classify the following statements as unsafe: 1 - INSERT INTO t_myisam SELECT * FROM t_myisam_temp 2 - INSERT INTO t_myisam_temp SELECT * FROM t_myisam 3 - CREATE TEMPORARY TABLE t_myisam_temp SELECT * FROM t_myisam On the other hand, the following statements are classified as safe: 1 - INSERT INTO t_innodb SELECT * FROM t_myisam_temp 2 - INSERT INTO t_myisam_temp SELECT * FROM t_innodb The patch also guarantees that transactions that have a DROP TEMPORARY are always written to the binary log regardless of the mode and the outcome: commit or rollback. In particular, the DROP TEMPORARY is extended with the IF EXISTS clause when the current statement logging format is set to row. Finally, the patch allows to switch from STATEMENT to MIXED/ROW when there are temporary tables but the contrary is not possible.
2010-04-20 11:10:43 +02:00
else if (temporary_tables == NULL)
clear_current_stmt_binlog_format_row();
* Mixed replication mode * : 1) Fix for BUG#19630 "stored function inserting into two auto_increment breaks statement-based binlog": a stored function inserting into two such tables may fail to replicate (inserting wrong data in the slave's copy of the second table) if the slave's second table had an internal auto_increment counter different from master's. Because the auto_increment value autogenerated by master for the 2nd table does not go into binlog, only the first does, so the slave lacks information. To fix this, if running in mixed binlogging mode, if the stored function or trigger plans to update two different tables both having auto_increment columns, we switch to row-based for the whole function. We don't have a simple solution for statement-based binlogging mode, there the bug remains and will be documented as a known problem. Re-enabling rpl_switch_stm_row_mixed. 2) Fix for BUG#20630 "Mixed binlogging mode does not work with stored functions, triggers, views", which was a documented limitation (in mixed mode, we didn't detect that a stored function's execution needed row-based binlogging (due to some UUID() call for example); same for triggers, same for views (a view created from a SELECT UUID(), and doing INSERT INTO sometable SELECT theview; would not replicate row-based). This is implemented by, after parsing a routine's body, remembering in sp_head that this routine needs row-based binlogging. Then when this routine is used, the caller is marked to require row-based binlogging too. Same for views: when we parse a view and detect that its SELECT needs row-based binary logging, we mark the calling LEX as such. 3) Fix for BUG#20499 "mixed mode with temporary table breaks binlog": a temporary table containing e.g. UUID has its changes not binlogged, so any query updating a permanent table with data from the temporary table will run wrongly on slave. Solution: in mixed mode we don't switch back from row-based to statement-based when there exists temporary tables. 4) Attempt to test mysqlbinlog on a binlog generated by mysqlbinlog; impossible due to BUG#11312 and BUG#20329, but test is in place for when they are fixed.
2006-07-09 17:00:47 +02:00
}
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
DBUG_VOID_RETURN;
2006-07-12 08:52:47 +02:00
}
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
/**
Set the current database; use deep copy of C-string.
@param new_db a pointer to the new database name.
@param new_db_len length of the new database name.
Initialize the current database from a NULL-terminated string with
length. If we run out of memory, we free the current database and
return TRUE. This way the user will notice the error as there will be
no current database selected (in addition to the error message set by
malloc).
@note This operation just sets {db, db_length}. Switching the current
database usually involves other actions, like switching other database
attributes including security context. In the future, this operation
will be made private and more convenient interface will be provided.
@return Operation status
@retval FALSE Success
@retval TRUE Out-of-memory error
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
*/
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
bool set_db(const char *new_db, size_t new_db_len)
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
{
/* Do not reallocate memory if current chunk is big enough. */
if (db && new_db && db_length >= new_db_len)
memcpy(db, new_db, new_db_len+1);
else
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
{
Bug#34043: Server loops excessively in _checkchunk() when safemalloc is enabled Essentially, the problem is that safemalloc is excruciatingly slow as it checks all allocated blocks for overrun at each memory management primitive, yielding a almost exponential slowdown for the memory management functions (malloc, realloc, free). The overrun check basically consists of verifying some bytes of a block for certain magic keys, which catches some simple forms of overrun. Another minor problem is violation of aliasing rules and that its own internal list of blocks is prone to corruption. Another issue with safemalloc is rather the maintenance cost as the tool has a significant impact on the server code. Given the magnitude of memory debuggers available nowadays, especially those that are provided with the platform malloc implementation, maintenance of a in-house and largely obsolete memory debugger becomes a burden that is not worth the effort due to its slowness and lack of support for detecting more common forms of heap corruption. Since there are third-party tools that can provide the same functionality at a lower or comparable performance cost, the solution is to simply remove safemalloc. Third-party tools can provide the same functionality at a lower or comparable performance cost. The removal of safemalloc also allows a simplification of the malloc wrappers, removing quite a bit of kludge: redefinition of my_malloc, my_free and the removal of the unused second argument of my_free. Since free() always check whether the supplied pointer is null, redudant checks are also removed. Also, this patch adds unit testing for my_malloc and moves my_realloc implementation into the same file as the other memory allocation primitives.
2010-07-08 23:20:08 +02:00
my_free(db);
if (new_db)
db= my_strndup(new_db, new_db_len, MYF(MY_WME | ME_FATALERROR));
else
db= NULL;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
}
db_length= db ? new_db_len : 0;
return new_db && !db;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
}
/**
Set the current database; use shallow copy of C-string.
@param new_db a pointer to the new database name.
@param new_db_len length of the new database name.
@note This operation just sets {db, db_length}. Switching the current
database usually involves other actions, like switching other database
attributes including security context. In the future, this operation
will be made private and more convenient interface will be provided.
*/
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
void reset_db(char *new_db, size_t new_db_len)
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
{
db= new_db;
db_length= new_db_len;
}
/*
Copy the current database to the argument. Use the current arena to
allocate memory for a deep copy: current database may be freed after
a statement is parsed but before it's executed.
*/
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
bool copy_db_to(char **p_db, size_t *p_db_length)
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
{
if (db == NULL)
{
my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
return TRUE;
}
*p_db= strmake(db, db_length);
2006-10-16 18:57:33 +02:00
*p_db_length= db_length;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
return FALSE;
}
Fixed compiler warnings Fixed compile-pentium64 scripts Fixed wrong estimate of update_with_key_prefix in sql-bench Merge bk-internal.mysql.com:/home/bk/mysql-5.1 into mysql.com:/home/my/mysql-5.1 Fixed unsafe define of uint4korr() Fixed that --extern works with mysql-test-run.pl Small trivial cleanups This also fixes a bug in counting number of rows that are updated when we have many simultanous queries Move all connection handling and command exectuion main loop from sql_parse.cc to sql_connection.cc Split handle_one_connection() into reusable sub functions. Split create_new_thread() into reusable sub functions. Added thread_scheduler; Preliminary interface code for future thread_handling code. Use 'my_thread_id' for internal thread id's Make thr_alarm_kill() to depend on thread_id instead of thread Make thr_abort_locks_for_thread() depend on thread_id instead of thread In store_globals(), set my_thread_var->id to be thd->thread_id. Use my_thread_var->id as basis for my_thread_name() The above changes makes the connection we have between THD and threads more soft. Added a lot of DBUG_PRINT() and DBUG_ASSERT() functions Fixed compiler warnings Fixed core dumps when running with --debug Removed setting of signal masks (was never used) Made event code call pthread_exit() (portability fix) Fixed that event code doesn't call DBUG_xxx functions before my_thread_init() is called. Made handling of thread_id and thd->variables.pseudo_thread_id uniform. Removed one common 'not freed memory' warning from mysqltest Fixed a couple of usage of not initialized warnings (unlikely cases) Suppress compiler warnings from bdb and (for the moment) warnings from ndb
2007-02-23 12:13:55 +01:00
thd_scheduler scheduler;
2007-03-06 21:46:33 +01:00
public:
inline Internal_error_handler *get_internal_handler()
{ return m_internal_handler; }
2007-03-06 21:46:33 +01:00
/**
Add an internal error handler to the thread execution context.
@param handler the exception handler to add
*/
void push_internal_handler(Internal_error_handler *handler);
/**
Handle a sql condition.
@param sql_errno the condition error number
@param sqlstate the condition sqlstate
@param level the condition level
@param msg the condition message text
@param[out] cond_hdl the sql condition raised, if any
@return true if the condition is handled
*/
virtual bool handle_condition(uint sql_errno,
const char* sqlstate,
MYSQL_ERROR::enum_warning_level level,
const char* msg,
MYSQL_ERROR ** cond_hdl);
2007-03-06 21:46:33 +01:00
/**
Remove the error handler last pushed.
*/
Internal_error_handler *pop_internal_handler();
2007-03-06 21:46:33 +01:00
/**
Raise an exception condition.
@param code the MYSQL_ERRNO error code of the error
*/
void raise_error(uint code);
/**
Raise an exception condition, with a formatted message.
@param code the MYSQL_ERRNO error code of the error
*/
void raise_error_printf(uint code, ...);
/**
Raise a completion condition (warning).
@param code the MYSQL_ERRNO error code of the warning
*/
void raise_warning(uint code);
/**
Raise a completion condition (warning), with a formatted message.
@param code the MYSQL_ERRNO error code of the warning
*/
void raise_warning_printf(uint code, ...);
/**
Raise a completion condition (note), with a fixed message.
@param code the MYSQL_ERRNO error code of the note
*/
void raise_note(uint code);
/**
Raise an completion condition (note), with a formatted message.
@param code the MYSQL_ERRNO error code of the note
*/
void raise_note_printf(uint code, ...);
private:
/*
Only the implementation of the SIGNAL and RESIGNAL statements
is permitted to raise SQL conditions in a generic way,
or to raise them by bypassing handlers (RESIGNAL).
To raise a SQL condition, the code should use the public
raise_error() or raise_warning() methods provided by class THD.
*/
friend class Signal_common;
friend class Signal_statement;
friend class Resignal_statement;
friend void push_warning(THD*, MYSQL_ERROR::enum_warning_level, uint, const char*);
friend void my_message_sql(uint, const char *, myf);
/**
Raise a generic SQL condition.
@param sql_errno the condition error number
@param sqlstate the condition SQLSTATE
@param level the condition level
@param msg the condition message text
@return The condition raised, or NULL
*/
MYSQL_ERROR*
raise_condition(uint sql_errno,
const char* sqlstate,
MYSQL_ERROR::enum_warning_level level,
const char* msg);
/**
Raise a generic SQL condition, without activation any SQL condition
handlers.
This method is necessary to support the RESIGNAL statement,
which is allowed to bypass SQL exception handlers.
@param sql_errno the condition error number
@param sqlstate the condition SQLSTATE
@param level the condition level
@param msg the condition message text
@return The condition raised, or NULL
*/
MYSQL_ERROR*
raise_condition_no_handler(uint sql_errno,
const char* sqlstate,
MYSQL_ERROR::enum_warning_level level,
const char* msg);
public:
/** Overloaded to guard query/query_length fields */
virtual void set_statement(Statement *stmt);
/**
Assign a new value to thd->query and thd->query_id.
Protected with LOCK_thd_data mutex.
*/
void set_query(char *query_arg, uint32 query_length_arg);
void set_query_and_id(char *query_arg, uint32 query_length_arg,
query_id_t new_query_id);
void set_query_id(query_id_t new_query_id);
void set_open_tables(TABLE *open_tables_arg)
{
mysql_mutex_lock(&LOCK_thd_data);
open_tables= open_tables_arg;
mysql_mutex_unlock(&LOCK_thd_data);
}
void enter_locked_tables_mode(enum_locked_tables_mode mode_arg)
{
DBUG_ASSERT(locked_tables_mode == LTM_NONE);
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
mdl_context.set_trans_sentinel();
locked_tables_mode= mode_arg;
}
void leave_locked_tables_mode();
int decide_logging_format(TABLE_LIST *tables);
void set_current_user_used() { current_user_used= TRUE; }
bool is_current_user_used() { return current_user_used; }
void clean_current_user_used() { current_user_used= FALSE; }
void get_definer(LEX_USER *definer);
void set_invoker(const LEX_STRING *user, const LEX_STRING *host)
{
invoker_user= *user;
invoker_host= *host;
}
LEX_STRING get_invoker_user() { return invoker_user; }
LEX_STRING get_invoker_host() { return invoker_host; }
bool has_invoker() { return invoker_user.length > 0; }
2007-03-06 21:46:33 +01:00
private:
BUG#39934: Slave stops for engine that only support row-based logging General overview: The logic for switching to row format when binlog_format=MIXED had numerous flaws. The underlying problem was the lack of a consistent architecture. General purpose of this changeset: This changeset introduces an architecture for switching to row format when binlog_format=MIXED. It enforces the architecture where it has to. It leaves some bugs to be fixed later. It adds extensive tests to verify that unsafe statements work as expected and that appropriate errors are produced by problems with the selection of binlog format. It was not practical to split this into smaller pieces of work. Problem 1: To determine the logging mode, the code has to take several parameters into account (namely: (1) the value of binlog_format; (2) the capabilities of the engines; (3) the type of the current statement: normal, unsafe, or row injection). These parameters may conflict in several ways, namely: - binlog_format=STATEMENT for a row injection - binlog_format=STATEMENT for an unsafe statement - binlog_format=STATEMENT for an engine only supporting row logging - binlog_format=ROW for an engine only supporting statement logging - statement is unsafe and engine does not support row logging - row injection in a table that does not support statement logging - statement modifies one table that does not support row logging and one that does not support statement logging Several of these conflicts were not detected, or were detected with an inappropriate error message. The problem of BUG#39934 was that no appropriate error message was written for the case when an engine only supporting row logging executed a row injection with binlog_format=ROW. However, all above cases must be handled. Fix 1: Introduce new error codes (sql/share/errmsg.txt). Ensure that all conditions are detected and handled in decide_logging_format() Problem 2: The binlog format shall be determined once per statement, in decide_logging_format(). It shall not be changed before or after that. Before decide_logging_format() is called, all information necessary to determine the logging format must be available. This principle ensures that all unsafe statements are handled in a consistent way. However, this principle is not followed: thd->set_current_stmt_binlog_row_based_if_mixed() is called in several places, including from code executing UPDATE..LIMIT, INSERT..SELECT..LIMIT, DELETE..LIMIT, INSERT DELAYED, and SET @@binlog_format. After Problem 1 was fixed, that caused inconsistencies where these unsafe statements would not print the appropriate warnings or errors for some of the conflicts. Fix 2: Remove calls to THD::set_current_stmt_binlog_row_based_if_mixed() from code executed after decide_logging_format(). Compensate by calling the set_current_stmt_unsafe() at parse time. This way, all unsafe statements are detected by decide_logging_format(). Problem 3: INSERT DELAYED is not unsafe: it is logged in statement format even if binlog_format=MIXED, and no warning is printed even if binlog_format=STATEMENT. This is BUG#45825. Fix 3: Made INSERT DELAYED set itself to unsafe at parse time. This allows decide_logging_format() to detect that a warning should be printed or the binlog_format changed. Problem 4: LIMIT clause were not marked as unsafe when executed inside stored functions/triggers/views/prepared statements. This is BUG#45785. Fix 4: Make statements containing the LIMIT clause marked as unsafe at parse time, instead of at execution time. This allows propagating unsafe-ness to the view.
2009-07-14 21:31:19 +02:00
2007-03-06 21:46:33 +01:00
/** The current internal error handler for this thread, or NULL. */
Internal_error_handler *m_internal_handler;
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
The lex to hold the parsed tree of conventional (non-prepared) queries.
Whereas for prepared and stored procedure statements we use an own lex
instance for each new query, for conventional statements we reuse
the same lex. (@see mysql_parse for details).
*/
LEX main_lex;
/**
This memory root is used for two purposes:
- for conventional queries, to allocate structures stored in main_lex
during parsing, and allocate runtime data (execution plan, etc.)
during execution.
- for prepared queries, only to allocate runtime data. The parsed
tree itself is reused between executions and thus is stored elsewhere.
*/
MEM_ROOT main_mem_root;
Warning_info main_warning_info;
Diagnostics_area main_da;
/**
It will be set TURE if CURRENT_USER() is called in account management
statements or default definer is set in CREATE/ALTER SP, SF, Event,
TRIGGER or VIEW statements.
Current user will be binlogged into Query_log_event if current_user_used
is TRUE; It will be stored into invoker_host and invoker_user by SQL thread.
*/
bool current_user_used;
/**
It points to the invoker in the Query_log_event.
SQL thread use it as the default definer in CREATE/ALTER SP, SF, Event,
TRIGGER or VIEW statements or current user in account management
statements if it is not NULL.
*/
LEX_STRING invoker_user;
LEX_STRING invoker_host;
2000-07-31 21:29:14 +02:00
};
/** A short cut for thd->stmt_da->set_ok_status(). */
inline void
my_ok(THD *thd, ulonglong affected_rows= 0, ulonglong id= 0,
const char *message= NULL)
{
thd->set_row_count_func(affected_rows);
thd->stmt_da->set_ok_status(thd, affected_rows, id, message);
}
/** A short cut for thd->stmt_da->set_eof_status(). */
inline void
my_eof(THD *thd)
{
thd->set_row_count_func(-1);
thd->stmt_da->set_eof_status(thd);
}
#define tmp_disable_binlog(A) \
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
{ulonglong tmp_disable_binlog__save_options= (A)->variables.option_bits; \
(A)->variables.option_bits&= ~OPTION_BIN_LOG
WL#4738 streamline/simplify @@variable creation process Bug#16565 mysqld --help --verbose does not order variablesBug#20413 sql_slave_skip_counter is not shown in show variables Bug#20415 Output of mysqld --help --verbose is incomplete Bug#25430 variable not found in SELECT @@global.ft_max_word_len; Bug#32902 plugin variables don't know their names Bug#34599 MySQLD Option and Variable Reference need to be consistent in formatting! Bug#34829 No default value for variable and setting default does not raise error Bug#34834 ? Is accepted as a valid sql mode Bug#34878 Few variables have default value according to documentation but error occurs Bug#34883 ft_boolean_syntax cant be assigned from user variable to global var. Bug#37187 `INFORMATION_SCHEMA`.`GLOBAL_VARIABLES`: inconsistent status Bug#40988 log_output_basic.test succeeded though syntactically false. Bug#41010 enum-style command-line options are not honoured (maria.maria-recover fails) Bug#42103 Setting key_buffer_size to a negative value may lead to very large allocations Bug#44691 Some plugins configured as MYSQL_PLUGIN_MANDATORY in can be disabled Bug#44797 plugins w/o command-line options have no disabling option in --help Bug#46314 string system variables don't support expressions Bug#46470 sys_vars.max_binlog_cache_size_basic_32 is broken Bug#46586 When using the plugin interface the type "set" for options caused a crash. Bug#47212 Crash in DBUG_PRINT in mysqltest.cc when trying to print octal number Bug#48758 mysqltest crashes on sys_vars.collation_server_basic in gcov builds Bug#49417 some complaints about mysqld --help --verbose output Bug#49540 DEFAULT value of binlog_format isn't the default value Bug#49640 ambiguous option '--skip-skip-myisam' (double skip prefix) Bug#49644 init_connect and \0 Bug#49645 init_slave and multi-byte characters Bug#49646 mysql --show-warnings crashes when server dies
2009-12-22 10:35:56 +01:00
#define reenable_binlog(A) (A)->variables.option_bits= tmp_disable_binlog__save_options;}
2002-03-16 09:36:27 +01:00
/*
Used to hold information about file and file structure in exchange
2002-03-16 09:36:27 +01:00
via non-DB file (...INTO OUTFILE..., ...LOAD DATA...)
XXX: We never call destructor for objects of this class.
2002-03-16 09:36:27 +01:00
*/
2000-07-31 21:29:14 +02:00
class sql_exchange :public Sql_alloc
{
public:
2009-10-12 08:22:53 +02:00
enum enum_filetype filetype; /* load XML, Added by Arnold & Erik */
2000-07-31 21:29:14 +02:00
char *file_name;
String *field_term,*enclosed,*line_term,*line_start,*escaped;
bool opt_enclosed;
bool dumpfile;
ulong skip_lines;
CHARSET_INFO *cs;
2009-10-12 08:22:53 +02:00
sql_exchange(char *name, bool dumpfile_flag,
enum_filetype filetype_arg= FILETYPE_CSV);
bool escaped_given(void);
2000-07-31 21:29:14 +02:00
};
/*
This is used to get result from a select
2000-07-31 21:29:14 +02:00
*/
2001-07-01 12:20:53 +02:00
class JOIN;
2000-07-31 21:29:14 +02:00
class select_result :public Sql_alloc {
protected:
THD *thd;
SELECT_LEX_UNIT *unit;
2000-07-31 21:29:14 +02:00
public:
select_result();
virtual ~select_result() {};
virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
unit= u;
return 0;
}
virtual int prepare2(void) { return 0; }
/*
Because of peculiarities of prepared statements protocol
we need to know number of columns in the result set (if
there is a result set) apart from sending columns metadata.
*/
virtual uint field_count(List<Item> &fields) const
{ return fields.elements; }
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
virtual bool send_result_set_metadata(List<Item> &list, uint flags)=0;
2000-07-31 21:29:14 +02:00
virtual bool send_data(List<Item> &items)=0;
virtual bool initialize_tables (JOIN *join=0) { return 0; }
virtual void send_error(uint errcode,const char *err);
2000-07-31 21:29:14 +02:00
virtual bool send_eof()=0;
/**
Check if this query returns a result set and therefore is allowed in
cursors and set an error message if it is not the case.
@retval FALSE success
@retval TRUE error, an error message is set
*/
virtual bool check_simple_select() const;
2000-07-31 21:29:14 +02:00
virtual void abort() {}
/*
Cleanup instance of this class for next execution of a prepared
statement/stored procedure.
*/
virtual void cleanup();
void set_thd(THD *thd_arg) { thd= thd_arg; }
#ifdef EMBEDDED_LIBRARY
virtual void begin_dataset() {}
#else
void begin_dataset() {}
#endif
2000-07-31 21:29:14 +02:00
};
/*
Base class for select_result descendands which intercept and
transform result set rows. As the rows are not sent to the client,
sending of result set metadata should be suppressed as well.
*/
class select_result_interceptor: public select_result
{
public:
select_result_interceptor() {} /* Remove gcc warning */
uint field_count(List<Item> &fields) const { return 0; }
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
bool send_result_set_metadata(List<Item> &fields, uint flag) { return FALSE; }
};
2000-07-31 21:29:14 +02:00
class select_send :public select_result {
/**
True if we have sent result set metadata to the client.
In this case the client always expects us to end the result
set with an eof or error packet
*/
bool is_result_set_started;
2000-07-31 21:29:14 +02:00
public:
select_send() :is_result_set_started(FALSE) {}
Backport of revno 2630.28.10, 2630.28.31, 2630.28.26, 2630.33.1, 2630.39.1, 2630.28.29, 2630.34.3, 2630.34.2, 2630.34.1, 2630.29.29, 2630.29.28, 2630.31.1, 2630.28.13, 2630.28.10, 2617.23.14 and some other minor revisions. This patch implements: WL#4264 "Backup: Stabilize Service Interface" -- all the server prerequisites except si_objects.{h,cc} themselves (they can be just copied over, when needed). WL#4435: Support OUT-parameters in prepared statements. (and all issues in the initial patches for these two tasks, that were discovered in pushbuild and during testing). Bug#39519: mysql_stmt_close() should flush all data associated with the statement. After execution of a prepared statement, send OUT parameters of the invoked stored procedure, if any, to the client. When using the binary protocol, send the parameters in an additional result set over the wire. When using the text protocol, assign out parameters to the user variables from the CALL(@var1, @var2, ...) specification. The following refactoring has been made: - Protocol::send_fields() was renamed to Protocol::send_result_set_metadata(); - A new Protocol::send_result_set_row() was introduced to incapsulate common functionality for sending row data. - Signature of Protocol::prepare_for_send() was changed: this operation does not need a list of items, the number of items is fully sufficient. The following backward incompatible changes have been made: - CLIENT_MULTI_RESULTS is now enabled by default in the client; - CLIENT_PS_MULTI_RESUTLS is now enabled by default in the client.
2009-10-21 22:02:06 +02:00
bool send_result_set_metadata(List<Item> &list, uint flags);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
bool send_eof();
virtual bool check_simple_select() const { return FALSE; }
void abort();
virtual void cleanup();
2000-07-31 21:29:14 +02:00
};
class select_to_file :public select_result_interceptor {
protected:
2000-07-31 21:29:14 +02:00
sql_exchange *exchange;
File file;
IO_CACHE cache;
ha_rows row_count;
char path[FN_REFLEN];
public:
select_to_file(sql_exchange *ex) :exchange(ex), file(-1),row_count(0L)
{ path[0]=0; }
~select_to_file();
void send_error(uint errcode,const char *err);
bool send_eof();
void cleanup();
};
#define ESCAPE_CHARS "ntrb0ZN" // keep synchronous with READ_INFO::unescape
/*
List of all possible characters of a numeric value text representation.
*/
#define NUMERIC_CHARS ".0123456789e+-"
class select_export :public select_to_file {
2000-07-31 21:29:14 +02:00
uint field_term_length;
int field_sep_char,escape_char,line_sep_char;
int field_term_char; // first char of FIELDS TERMINATED BY or MAX_INT
/*
The is_ambiguous_field_sep field is true if a value of the field_sep_char
field is one of the 'n', 't', 'r' etc characters
(see the READ_INFO::unescape method and the ESCAPE_CHARS constant value).
*/
bool is_ambiguous_field_sep;
/*
The is_ambiguous_field_term is true if field_sep_char contains the first
char of the FIELDS TERMINATED BY (ENCLOSED BY is empty), and items can
contain this character.
*/
bool is_ambiguous_field_term;
/*
The is_unsafe_field_sep field is true if a value of the field_sep_char
field is one of the '0'..'9', '+', '-', '.' and 'e' characters
(see the NUMERIC_CHARS constant value).
*/
bool is_unsafe_field_sep;
2000-07-31 21:29:14 +02:00
bool fixed_row_size;
CHARSET_INFO *write_cs; // output charset
2000-07-31 21:29:14 +02:00
public:
select_export(sql_exchange *ex) :select_to_file(ex) {}
2000-07-31 21:29:14 +02:00
~select_export();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
};
class select_dump :public select_to_file {
2000-07-31 21:29:14 +02:00
public:
select_dump(sql_exchange *ex) :select_to_file(ex) {}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
};
class select_insert :public select_result_interceptor {
2001-07-11 13:06:41 +02:00
public:
2004-07-16 00:15:55 +02:00
TABLE_LIST *table_list;
2000-07-31 21:29:14 +02:00
TABLE *table;
List<Item> *fields;
WL#3146 "less locking in auto_increment": this is a cleanup patch for our current auto_increment handling: new names for auto_increment variables in THD, new methods to manipulate them (see sql_class.h), some move into handler::, causing less backup/restore work when executing substatements. This makes the logic hopefully clearer, less work is is needed in mysql_insert(). By cleaning up, using different variables for different purposes (instead of one for 3 things...), we fix those bugs, which someone may want to fix in 5.0 too: BUG#20339 "stored procedure using LAST_INSERT_ID() does not replicate statement-based" BUG#20341 "stored function inserting into one auto_increment puts bad data in slave" BUG#19243 "wrong LAST_INSERT_ID() after ON DUPLICATE KEY UPDATE" (now if a row is updated, LAST_INSERT_ID() will return its id) and re-fixes: BUG#6880 "LAST_INSERT_ID() value changes during multi-row INSERT" (already fixed differently by Ramil in 4.1) Test of documented behaviour of mysql_insert_id() (there was no test). The behaviour changes introduced are: - LAST_INSERT_ID() now returns "the first autogenerated auto_increment value successfully inserted", instead of "the first autogenerated auto_increment value if any row was successfully inserted", see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() returns the id of the updated row if ON DUPLICATE KEY UPDATE, see auto_increment.test. Same for mysql_insert_id(), see mysql_client_test.c. - LAST_INSERT_ID() does not change if no autogenerated value was successfully inserted (it used to then be 0), see auto_increment.test. - if in INSERT SELECT no autogenerated value was successfully inserted, mysql_insert_id() now returns the id of the last inserted row (it already did this for INSERT VALUES), see mysql_client_test.c. - if INSERT SELECT uses LAST_INSERT_ID(X), mysql_insert_id() now returns X (it already did this for INSERT VALUES), see mysql_client_test.c. - NDB now behaves like other engines wrt SET INSERT_ID: with INSERT IGNORE, the id passed in SET INSERT_ID is re-used until a row succeeds; SET INSERT_ID influences not only the first row now. Additionally, when unlocking a table we check that the thread is not keeping a next_insert_id (as the table is unlocked that id is potentially out-of-date); forgetting about this next_insert_id is done in a new handler::ha_release_auto_increment(). Finally we prepare for engines capable of reserving finite-length intervals of auto_increment values: we store such intervals in THD. The next step (to be done by the replication team in 5.1) is to read those intervals from THD and actually store them in the statement-based binary log. NDB will be a good engine to test that.
2006-07-09 17:52:19 +02:00
ulonglong autoinc_value_of_last_inserted_row; // autogenerated or not
2000-07-31 21:29:14 +02:00
COPY_INFO info;
2004-07-16 00:15:55 +02:00
bool insert_into_view;
2004-12-22 12:54:39 +01:00
select_insert(TABLE_LIST *table_list_par,
TABLE *table_par, List<Item> *fields_par,
List<Item> *update_fields, List<Item> *update_values,
enum_duplicates duplic, bool ignore);
2000-07-31 21:29:14 +02:00
~select_insert();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
virtual int prepare2(void);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
virtual void store_values(List<Item> &values);
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
virtual bool can_rollback_data() { return 0; }
2000-07-31 21:29:14 +02:00
void send_error(uint errcode,const char *err);
bool send_eof();
void abort();
/* not implemented: select_insert is never re-used in prepared statements */
void cleanup();
2000-07-31 21:29:14 +02:00
};
2000-07-31 21:29:14 +02:00
class select_create: public select_insert {
ORDER *group;
2004-07-16 00:15:55 +02:00
TABLE_LIST *create_table;
2000-07-31 21:29:14 +02:00
HA_CREATE_INFO *create_info;
TABLE_LIST *select_tables;
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
Alter_info *alter_info;
2000-07-31 21:29:14 +02:00
Field **field;
/* lock data for tmp table */
MYSQL_LOCK *m_lock;
/* m_lock or thd->extra_lock */
MYSQL_LOCK **m_plock;
2000-07-31 21:29:14 +02:00
public:
select_create (TABLE_LIST *table_arg,
2004-07-16 00:15:55 +02:00
HA_CREATE_INFO *create_info_par,
Alter_info *alter_info_arg,
List<Item> &select_fields,enum_duplicates duplic, bool ignore,
TABLE_LIST *select_tables_arg)
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
:select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore),
5.1 version of a fix and test cases for bugs: Bug#4968 ""Stored procedure crash if cursor opened on altered table" Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Bug#24879 "Prepared Statements: CREATE TABLE (UTF8 KEY) produces a growing key length" (this bug is not fixed in 5.0) Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table are not re-execution friendly: during their operation they modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO in mysql_execute_command. Additionally, this patch splits the part of mysql_alter_table that analizes and rewrites information from the parser into a separate function - mysql_prepare_alter_table, in analogy with mysql_prepare_table, which is renamed to mysql_prepare_create_table.
2007-05-28 13:30:01 +02:00
create_table(table_arg),
create_info(create_info_par),
select_tables(select_tables_arg),
alter_info(alter_info_arg),
m_plock(NULL)
2000-07-31 21:29:14 +02:00
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
int binlog_show_create_table(TABLE **tables, uint count);
void store_values(List<Item> &values);
void send_error(uint errcode,const char *err);
2000-07-31 21:29:14 +02:00
bool send_eof();
void abort();
This changeset is largely a handler cleanup changeset (WL#3281), but includes fixes and cleanups that was found necessary while testing the handler changes Changes that requires code changes in other code of other storage engines. (Note that all changes are very straightforward and one should find all issues by compiling a --debug build and fixing all compiler errors and all asserts in field.cc while running the test suite), - New optional handler function introduced: reset() This is called after every DML statement to make it easy for a handler to statement specific cleanups. (The only case it's not called is if force the file to be closed) - handler::extra(HA_EXTRA_RESET) is removed. Code that was there before should be moved to handler::reset() - table->read_set contains a bitmap over all columns that are needed in the query. read_row() and similar functions only needs to read these columns - table->write_set contains a bitmap over all columns that will be updated in the query. write_row() and update_row() only needs to update these columns. The above bitmaps should now be up to date in all context (including ALTER TABLE, filesort()). The handler is informed of any changes to the bitmap after fix_fields() by calling the virtual function handler::column_bitmaps_signal(). If the handler does caching of these bitmaps (instead of using table->read_set, table->write_set), it should redo the caching in this code. as the signal() may be sent several times, it's probably best to set a variable in the signal and redo the caching on read_row() / write_row() if the variable was set. - Removed the read_set and write_set bitmap objects from the handler class - Removed all column bit handling functions from the handler class. (Now one instead uses the normal bitmap functions in my_bitmap.c instead of handler dedicated bitmap functions) - field->query_id is removed. One should instead instead check table->read_set and table->write_set if a field is used in the query. - handler::extra(HA_EXTRA_RETRIVE_ALL_COLS) and handler::extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) are removed. One should now instead use table->read_set to check for which columns to retrieve. - If a handler needs to call Field->val() or Field->store() on columns that are not used in the query, one should install a temporary all-columns-used map while doing so. For this, we provide the following functions: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); field->val(); dbug_tmp_restore_column_map(table->read_set, old_map); and similar for the write map: my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); field->val(); dbug_tmp_restore_column_map(table->write_set, old_map); If this is not done, you will sooner or later hit a DBUG_ASSERT in the field store() / val() functions. (For not DBUG binaries, the dbug_tmp_restore_column_map() and dbug_tmp_restore_column_map() are inline dummy functions and should be optimized away be the compiler). - If one needs to temporary set the column map for all binaries (and not just to avoid the DBUG_ASSERT() in the Field::store() / Field::val() methods) one should use the functions tmp_use_all_columns() and tmp_restore_column_map() instead of the above dbug_ variants. - All 'status' fields in the handler base class (like records, data_file_length etc) are now stored in a 'stats' struct. This makes it easier to know what status variables are provided by the base handler. This requires some trivial variable names in the extra() function. - New virtual function handler::records(). This is called to optimize COUNT(*) if (handler::table_flags() & HA_HAS_RECORDS()) is true. (stats.records is not supposed to be an exact value. It's only has to be 'reasonable enough' for the optimizer to be able to choose a good optimization path). - Non virtual handler::init() function added for caching of virtual constants from engine. - Removed has_transactions() virtual method. Now one should instead return HA_NO_TRANSACTIONS in table_flags() if the table handler DOES NOT support transactions. - The 'xxxx_create_handler()' function now has a MEM_ROOT_root argument that is to be used with 'new handler_name()' to allocate the handler in the right area. The xxxx_create_handler() function is also responsible for any initialization of the object before returning. For example, one should change: static handler *myisam_create_handler(TABLE_SHARE *table) { return new ha_myisam(table); } -> static handler *myisam_create_handler(TABLE_SHARE *table, MEM_ROOT *mem_root) { return new (mem_root) ha_myisam(table); } - New optional virtual function: use_hidden_primary_key(). This is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined but we don't have a primary key. This allows the handler to take precisions in remembering any hidden primary key to able to update/delete any found row. The default handler marks all columns to be read. - handler::table_flags() now returns a ulonglong (to allow for more flags). - New/changed table_flags() - HA_HAS_RECORDS Set if ::records() is supported - HA_NO_TRANSACTIONS Set if engine doesn't support transactions - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE Set if we should mark all primary key columns for read when reading rows as part of a DELETE statement. If there is no primary key, all columns are marked for read. - HA_PARTIAL_COLUMN_READ Set if engine will not read all columns in some cases (based on table->read_set) - HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS Renamed to HA_PRIMARY_KEY_REQUIRED_FOR_POSITION. - HA_DUPP_POS Renamed to HA_DUPLICATE_POS - HA_REQUIRES_KEY_COLUMNS_FOR_DELETE Set this if we should mark ALL key columns for read when when reading rows as part of a DELETE statement. In case of an update we will mark all keys for read for which key part changed value. - HA_STATS_RECORDS_IS_EXACT Set this if stats.records is exact. (This saves us some extra records() calls when optimizing COUNT(*)) - Removed table_flags() - HA_NOT_EXACT_COUNT Now one should instead use HA_HAS_RECORDS if handler::records() gives an exact count() and HA_STATS_RECORDS_IS_EXACT if stats.records is exact. - HA_READ_RND_SAME Removed (no one supported this one) - Removed not needed functions ha_retrieve_all_cols() and ha_retrieve_all_pk() - Renamed handler::dupp_pos to handler::dup_pos - Removed not used variable handler::sortkey Upper level handler changes: - ha_reset() now does some overall checks and calls ::reset() - ha_table_flags() added. This is a cached version of table_flags(). The cache is updated on engine creation time and updated on open. MySQL level changes (not obvious from the above): - DBUG_ASSERT() added to check that column usage matches what is set in the column usage bit maps. (This found a LOT of bugs in current column marking code). - In 5.1 before, all used columns was marked in read_set and only updated columns was marked in write_set. Now we only mark columns for which we need a value in read_set. - Column bitmaps are created in open_binary_frm() and open_table_from_share(). (Before this was in table.cc) - handler::table_flags() calls are replaced with handler::ha_table_flags() - For calling field->val() you must have the corresponding bit set in table->read_set. For calling field->store() you must have the corresponding bit set in table->write_set. (There are asserts in all store()/val() functions to catch wrong usage) - thd->set_query_id is renamed to thd->mark_used_columns and instead of setting this to an integer value, this has now the values: MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE Changed also all variables named 'set_query_id' to mark_used_columns. - In filesort() we now inform the handler of exactly which columns are needed doing the sort and choosing the rows. - The TABLE_SHARE object has a 'all_set' column bitmap one can use when one needs a column bitmap with all columns set. (This is used for table->use_all_columns() and other places) - The TABLE object has 3 column bitmaps: - def_read_set Default bitmap for columns to be read - def_write_set Default bitmap for columns to be written - tmp_set Can be used as a temporary bitmap when needed. The table object has also two pointer to bitmaps read_set and write_set that the handler should use to find out which columns are used in which way. - count() optimization now calls handler::records() instead of using handler->stats.records (if (table_flags() & HA_HAS_RECORDS) is true). - Added extra argument to Item::walk() to indicate if we should also traverse sub queries. - Added TABLE parameter to cp_buffer_from_ref() - Don't close tables created with CREATE ... SELECT but keep them in the table cache. (Faster usage of newly created tables). New interfaces: - table->clear_column_bitmaps() to initialize the bitmaps for tables at start of new statements. - table->column_bitmaps_set() to set up new column bitmaps and signal the handler about this. - table->column_bitmaps_set_no_signal() for some few cases where we need to setup new column bitmaps but don't signal the handler (as the handler has already been signaled about these before). Used for the momement only in opt_range.cc when doing ROR scans. - table->use_all_columns() to install a bitmap where all columns are marked as use in the read and the write set. - table->default_column_bitmaps() to install the normal read and write column bitmaps, but not signaling the handler about this. This is mainly used when creating TABLE instances. - table->mark_columns_needed_for_delete(), table->mark_columns_needed_for_delete() and table->mark_columns_needed_for_insert() to allow us to put additional columns in column usage maps if handler so requires. (The handler indicates what it neads in handler->table_flags()) - table->prepare_for_position() to allow us to tell handler that it needs to read primary key parts to be able to store them in future table->position() calls. (This replaces the table->file->ha_retrieve_all_pk function) - table->mark_auto_increment_column() to tell handler are going to update columns part of any auto_increment key. - table->mark_columns_used_by_index() to mark all columns that is part of an index. It will also send extra(HA_EXTRA_KEYREAD) to handler to allow it to quickly know that it only needs to read colums that are part of the key. (The handler can also use the column map for detecting this, but simpler/faster handler can just monitor the extra() call). - table->mark_columns_used_by_index_no_reset() to in addition to other columns, also mark all columns that is used by the given key. - table->restore_column_maps_after_mark_index() to restore to default column maps after a call to table->mark_columns_used_by_index(). - New item function register_field_in_read_map(), for marking used columns in table->read_map. Used by filesort() to mark all used columns - Maintain in TABLE->merge_keys set of all keys that are used in query. (Simplices some optimization loops) - Maintain Field->part_of_key_not_clustered which is like Field->part_of_key but the field in the clustered key is not assumed to be part of all index. (used in opt_range.cc for faster loops) - dbug_tmp_use_all_columns(), dbug_tmp_restore_column_map() tmp_use_all_columns() and tmp_restore_column_map() functions to temporally mark all columns as usable. The 'dbug_' version is primarily intended inside a handler when it wants to just call Field:store() & Field::val() functions, but don't need the column maps set for any other usage. (ie:: bitmap_is_set() is never called) - We can't use compare_records() to skip updates for handlers that returns a partial column set and the read_set doesn't cover all columns in the write set. The reason for this is that if we have a column marked only for write we can't in the MySQL level know if the value changed or not. The reason this worked before was that MySQL marked all to be written columns as also to be read. The new 'optimal' bitmaps exposed this 'hidden bug'. - open_table_from_share() does not anymore setup temporary MEM_ROOT object as a thread specific variable for the handler. Instead we send the to-be-used MEMROOT to get_new_handler(). (Simpler, faster code) Bugs fixed: - Column marking was not done correctly in a lot of cases. (ALTER TABLE, when using triggers, auto_increment fields etc) (Could potentially result in wrong values inserted in table handlers relying on that the old column maps or field->set_query_id was correct) Especially when it comes to triggers, there may be cases where the old code would cause lost/wrong values for NDB and/or InnoDB tables. - Split thd->options flag OPTION_STATUS_NO_TRANS_UPDATE to two flags: OPTION_STATUS_NO_TRANS_UPDATE and OPTION_KEEP_LOG. This allowed me to remove some wrong warnings about: "Some non-transactional changed tables couldn't be rolled back" - Fixed handling of INSERT .. SELECT and CREATE ... SELECT that wrongly reset (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) which caused us to loose some warnings about "Some non-transactional changed tables couldn't be rolled back") - Fixed use of uninitialized memory in ha_ndbcluster.cc::delete_table() which could cause delete_table to report random failures. - Fixed core dumps for some tests when running with --debug - Added missing FN_LIBCHAR in mysql_rm_tmp_tables() (This has probably caused us to not properly remove temporary files after crash) - slow_logs was not properly initialized, which could maybe cause extra/lost entries in slow log. - If we get an duplicate row on insert, change column map to read and write all columns while retrying the operation. This is required by the definition of REPLACE and also ensures that fields that are only part of UPDATE are properly handled. This fixed a bug in NDB and REPLACE where REPLACE wrongly copied some column values from the replaced row. - For table handler that doesn't support NULL in keys, we would give an error when creating a primary key with NULL fields, even after the fields has been automaticly converted to NOT NULL. - Creating a primary key on a SPATIAL key, would fail if field was not declared as NOT NULL. Cleanups: - Removed not used condition argument to setup_tables - Removed not needed item function reset_query_id_processor(). - Field->add_index is removed. Now this is instead maintained in (field->flags & FIELD_IN_ADD_INDEX) - Field->fieldnr is removed (use field->field_index instead) - New argument to filesort() to indicate that it should return a set of row pointers (not used columns). This allowed me to remove some references to sql_command in filesort and should also enable us to return column results in some cases where we couldn't before. - Changed column bitmap handling in opt_range.cc to be aligned with TABLE bitmap, which allowed me to use bitmap functions instead of looping over all fields to create some needed bitmaps. (Faster and smaller code) - Broke up found too long lines - Moved some variable declaration at start of function for better code readability. - Removed some not used arguments from functions. (setup_fields(), mysql_prepare_insert_check_table()) - setup_fields() now takes an enum instead of an int for marking columns usage. - For internal temporary tables, use handler::write_row(), handler::delete_row() and handler::update_row() instead of handler::ha_xxxx() for faster execution. - Changed some constants to enum's and define's. - Using separate column read and write sets allows for easier checking of timestamp field was set by statement. - Remove calls to free_io_cache() as this is now done automaticly in ha_reset() - Don't build table->normalized_path as this is now identical to table->path (after bar's fixes to convert filenames) - Fixed some missed DBUG_PRINT(.."%lx") to use "0x%lx" to make it easier to do comparision with the 'convert-dbug-for-diff' tool. Things left to do in 5.1: - We wrongly log failed CREATE TABLE ... SELECT in some cases when using row based logging (as shown by testcase binlog_row_mix_innodb_myisam.result) Mats has promised to look into this. - Test that my fix for CREATE TABLE ... SELECT is indeed correct. (I added several test cases for this, but in this case it's better that someone else also tests this throughly). Lars has promosed to do this.
2006-06-04 17:52:22 +02:00
virtual bool can_rollback_data() { return 1; }
2006-03-24 12:24:31 +01:00
// Needed for access from local class MY_HOOKS in prepare(), since thd is proteted.
const THD *get_thd(void) { return thd; }
const HA_CREATE_INFO *get_create_info() { return create_info; };
int prepare2(void) { return 0; }
2000-07-31 21:29:14 +02:00
};
#include <myisam.h>
/*
Param to create temporary tables when doing SELECT:s
NOTE
This structure is copied using memcpy as a part of JOIN.
*/
class TMP_TABLE_PARAM :public Sql_alloc
{
private:
/* Prevent use of these (not safe because of lists and copy_field) */
TMP_TABLE_PARAM(const TMP_TABLE_PARAM &);
void operator=(TMP_TABLE_PARAM &);
public:
List<Item> copy_funcs;
List<Item> save_copy_funcs;
Copy_field *copy_field, *copy_field_end;
Copy_field *save_copy_field, *save_copy_field_end;
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
uchar *group_buff;
Item **items_to_copy; /* Fields in tmp table */
MI_COLUMNDEF *recinfo,*start_recinfo;
KEY *keyinfo;
ha_rows end_write_records;
/**
Number of normal fields in the query, including those referred to
from aggregate functions. Hence, "SELECT `field1`,
SUM(`field2`) from t1" sets this counter to 2.
@see count_field_types
*/
uint field_count;
/**
Number of fields in the query that have functions. Includes both
aggregate functions (e.g., SUM) and non-aggregates (e.g., RAND).
Also counts functions referred to from aggregate functions, i.e.,
"SELECT SUM(RAND())" sets this counter to 2.
@see count_field_types
*/
uint func_count;
/**
Number of fields in the query that have aggregate functions. Note
that the optimizer may choose to optimize away these fields by
replacing them with constants, in which case sum_func_count will
need to be updated.
@see opt_sum_query, count_field_types
*/
uint sum_func_count;
uint hidden_field_count;
uint group_parts,group_length,group_null_parts;
uint quick_group;
bool using_indirect_summary_function;
/* If >0 convert all blob fields to varchar(convert_blob_length) */
uint convert_blob_length;
CHARSET_INFO *table_charset;
bool schema_table;
/*
True if GROUP BY and its aggregate functions are already computed
by a table access method (e.g. by loose index scan). In this case
query execution should not perform aggregation and should treat
aggregate functions as normal functions.
*/
bool precomputed_group_by;
Fixed bug#15560: GROUP_CONCAT wasn't ready for WITH ROLLUP queries The GROUP_CONCAT uses its own temporary table. When ROLLUP is present it creates the second copy of Item_func_group_concat. This copy receives the same list of arguments that original group_concat does. When the copy is set up the result_fields of functions from the argument list are reset to the temporary table of this copy. As a result of this action data from functions flow directly to the ROLLUP copy and the original group_concat functions shows wrong result. Since queries with COUNT(DISTINCT ...) use temporary tables to store the results the COUNT function they are also affected by this bug. The idea of the fix is to copy content of the result_field for the function under GROUP_CONCAT/COUNT from the first temporary table to the second one, rather than setting result_field to point to the second temporary table. To achieve this goal force_copy_fields flag is added to Item_func_group_concat and Item_sum_count_distinct classes. This flag is initialized to 0 and set to 1 into the make_unique() member function of both classes. To the TMP_TABLE_PARAM structure is modified to include the similar flag as well. The create_tmp_table() function passes that flag to create_tmp_field(). When the flag is set the create_tmp_field() function will set result_field as a source field and will not reset that result field to newly created field for Item_func_result_field and its descendants. Due to this there will be created copy func to copy data from old result_field to newly created field.
2006-03-29 21:30:34 +02:00
bool force_copy_fields;
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
2005-08-02 21:09:49 +02:00
group_length(0), group_null_parts(0), convert_blob_length(0),
2006-03-30 15:14:55 +02:00
schema_table(0), precomputed_group_by(0), force_copy_fields(0)
{}
~TMP_TABLE_PARAM()
{
cleanup();
}
void init(void);
inline void cleanup(void)
{
if (copy_field) /* Fix for Intel compiler */
{
delete [] copy_field;
save_copy_field= copy_field= 0;
}
}
};
class select_union :public select_result_interceptor
{
TMP_TABLE_PARAM tmp_table_param;
public:
TABLE *table;
select_union() :table(0) {}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
bool send_eof();
bool flush();
bool create_result_table(THD *thd, List<Item> *column_types,
bool is_distinct, ulonglong options,
const char *alias);
};
/* Base subselect interface class */
class select_subselect :public select_result_interceptor
{
protected:
Item_subselect *item;
public:
select_subselect(Item_subselect *item);
bool send_data(List<Item> &items)=0;
bool send_eof() { return 0; };
};
/* Single value subselect interface class */
2002-12-19 20:15:09 +01:00
class select_singlerow_subselect :public select_subselect
{
public:
select_singlerow_subselect(Item_subselect *item_arg)
:select_subselect(item_arg)
{}
bool send_data(List<Item> &items);
};
/* used in independent ALL/ANY optimisation */
class select_max_min_finder_subselect :public select_subselect
{
Item_cache *cache;
bool (select_max_min_finder_subselect::*op)();
bool fmax;
public:
select_max_min_finder_subselect(Item_subselect *item_arg, bool mx)
:select_subselect(item_arg), cache(0), fmax(mx)
{}
2005-02-11 22:33:52 +01:00
void cleanup();
bool send_data(List<Item> &items);
bool cmp_real();
bool cmp_int();
2005-02-11 22:33:52 +01:00
bool cmp_decimal();
bool cmp_str();
};
/* EXISTS subselect interface class */
class select_exists_subselect :public select_subselect
{
public:
select_exists_subselect(Item_subselect *item_arg)
:select_subselect(item_arg){}
bool send_data(List<Item> &items);
};
2000-07-31 21:29:14 +02:00
/* Structs used when sorting */
typedef struct st_sort_field {
Field *field; /* Field to sort */
Item *item; /* Item if not sorting fields */
uint length; /* Length of sort field */
uint suffix_length; /* Length suffix (0-4) */
2000-07-31 21:29:14 +02:00
Item_result result_type; /* Type of item */
bool reverse; /* if descending sort */
bool need_strxnfrm; /* If we have to use strxnfrm() */
2000-07-31 21:29:14 +02:00
} SORT_FIELD;
typedef struct st_sort_buffer {
uint index; /* 0 or 1 */
uint sort_orders;
uint change_pos; /* If sort-fields changed */
char **buff;
SORT_FIELD *sortorder;
} SORT_BUFFER;
/* Structure for db & table in sql_yacc */
class Table_ident :public Sql_alloc
{
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
public:
2000-07-31 21:29:14 +02:00
LEX_STRING db;
LEX_STRING table;
2002-05-06 23:04:16 +02:00
SELECT_LEX_UNIT *sel;
2003-04-02 15:16:19 +02:00
inline Table_ident(THD *thd, LEX_STRING db_arg, LEX_STRING table_arg,
bool force)
2002-05-06 23:04:16 +02:00
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
2000-07-31 21:29:14 +02:00
{
2003-04-02 15:16:19 +02:00
if (!force && (thd->client_capabilities & CLIENT_NO_SCHEMA))
2000-07-31 21:29:14 +02:00
db.str=0;
else
db= db_arg;
}
2002-05-06 23:04:16 +02:00
inline Table_ident(LEX_STRING table_arg)
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
{
db.str=0;
}
/*
This constructor is used only for the case when we create a derived
table. A derived table has no name and doesn't belong to any database.
Later, if there was an alias specified for the table, it will be set
by add_table_to_list.
*/
2006-04-13 09:50:33 +02:00
inline Table_ident(SELECT_LEX_UNIT *s) : sel(s)
2002-05-06 23:04:16 +02:00
{
2003-04-02 15:16:19 +02:00
/* We must have a table name here as this is used with add_table_to_list */
db.str= empty_c_string; /* a subject to casedn_str */
db.length= 0;
table.str= internal_table_name;
table.length=1;
2002-05-06 23:04:16 +02:00
}
bool is_derived_table() const { return test(sel); }
2000-07-31 21:29:14 +02:00
inline void change_db(char *db_name)
2002-05-06 23:04:16 +02:00
{
db.str= db_name; db.length= (uint) strlen(db_name);
}
2000-07-31 21:29:14 +02:00
};
// this is needed for user_vars hash
class user_var_entry
{
public:
user_var_entry() {} /* Remove gcc warning */
2000-07-31 21:29:14 +02:00
LEX_STRING name;
char *value;
ulong length;
query_id_t update_query_id, used_query_id;
2000-07-31 21:29:14 +02:00
Item_result type;
bool unsigned_flag;
2005-02-11 22:33:52 +01:00
double val_real(my_bool *null_value);
longlong val_int(my_bool *null_value) const;
String *val_str(my_bool *null_value, String *str, uint decimals);
2005-02-11 22:33:52 +01:00
my_decimal *val_decimal(my_bool *null_value, my_decimal *result);
DTCollation collation;
2000-07-31 21:29:14 +02:00
};
/*
Unique -- class for unique (removing of duplicates).
Puts all values to the TREE. If the tree becomes too big,
it's dumped to the file. User can request sorted values, or
just iterate through them. In the last case tree merging is performed in
memory simultaneously with iteration, so it should be ~2-3x faster.
*/
class Unique :public Sql_alloc
{
DYNAMIC_ARRAY file_ptrs;
ulong max_elements;
ulonglong max_in_memory_size;
IO_CACHE file;
TREE tree;
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
uchar *record_pointers;
bool flush();
uint size;
public:
ulong elements;
Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
uint size_arg, ulonglong max_in_memory_size_arg);
~Unique();
ulong elements_in_tree() { return tree.elements_in_tree; }
inline bool unique_add(void *ptr)
{
2005-02-11 22:33:52 +01:00
DBUG_ENTER("unique_add");
DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
if (tree.elements_in_tree > max_elements && flush())
2005-02-11 22:33:52 +01:00
DBUG_RETURN(1);
DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
}
bool get(TABLE *table);
static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
ulonglong max_in_memory_size);
inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
ulonglong max_in_memory_size)
{
register ulonglong max_elems_in_tree=
(1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
}
void reset();
bool walk(tree_walk_action action, void *walk_action_arg);
uint get_size() const { return size; }
ulonglong get_max_in_memory_size() const { return max_in_memory_size; }
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
friend int unique_write_to_file(uchar* key, element_count count, Unique *unique);
friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique);
};
2003-08-11 21:44:43 +02:00
class multi_delete :public select_result_interceptor
2002-12-05 18:38:42 +01:00
{
TABLE_LIST *delete_tables, *table_being_deleted;
2003-08-11 21:44:43 +02:00
Unique **tempfiles;
ha_rows deleted, found;
2002-12-05 18:38:42 +01:00
uint num_of_tables;
int error;
bool do_delete;
/* True if at least one table we delete from is transactional */
bool transactional_tables;
/* True if at least one table we delete from is not transactional */
bool normal_tables;
bool delete_while_scanning;
/*
error handling (rollback and binlogging) can happen in send_eof()
so that afterward send_error() needs to find out that.
*/
bool error_handled;
2002-12-05 18:38:42 +01:00
public:
multi_delete(TABLE_LIST *dt, uint num_of_tables);
2002-12-05 18:38:42 +01:00
~multi_delete();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
int do_deletes();
int do_table_deletes(TABLE *table, bool ignore);
2002-12-05 18:38:42 +01:00
bool send_eof();
2008-12-20 11:01:41 +01:00
inline ha_rows num_deleted()
{
return deleted;
}
virtual void abort();
2002-12-05 18:38:42 +01:00
};
class multi_update :public select_result_interceptor
{
TABLE_LIST *all_tables; /* query/update command tables */
TABLE_LIST *leaves; /* list of leves of join table tree */
TABLE_LIST *update_tables, *table_being_updated;
2003-04-02 15:16:19 +02:00
TABLE **tmp_tables, *main_table, *table_to_update;
TMP_TABLE_PARAM *tmp_table_param;
ha_rows updated, found;
List <Item> *fields, *values;
List <Item> **fields_for_table, **values_for_table;
uint table_count;
/*
List of tables referenced in the CHECK OPTION condition of
the updated view excluding the updated table.
*/
List <TABLE> unupdated_check_opt_tables;
Copy_field *copy_field;
enum enum_duplicates handle_duplicates;
bool do_update, trans_safe;
/* True if the update operation has made a change in a transactional table */
bool transactional_tables;
bool ignore;
/*
error handling (rollback and binlogging) can happen in send_eof()
so that afterward send_error() needs to find out that.
*/
bool error_handled;
public:
multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list,
List<Item> *fields, List<Item> *values,
enum_duplicates handle_duplicates, bool ignore);
~multi_update();
2002-12-05 18:38:42 +01:00
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
int do_updates();
bool send_eof();
2008-12-20 11:01:41 +01:00
inline ha_rows num_found()
{
return found;
}
inline ha_rows num_updated()
{
return updated;
}
virtual void abort();
};
class my_var : public Sql_alloc {
public:
LEX_STRING s;
2005-11-23 11:26:07 +01:00
#ifndef DBUG_OFF
/*
Routine to which this Item_splocal belongs. Used for checking if correct
runtime context is used for variable handling.
*/
sp_head *sp;
#endif
bool local;
uint offset;
enum_field_types type;
my_var (LEX_STRING& j, bool i, uint o, enum_field_types t)
:s(j), local(i), offset(o), type(t)
{}
~my_var() {}
};
class select_dumpvar :public select_result_interceptor {
2002-10-11 20:49:10 +02:00
ha_rows row_count;
public:
List<my_var> var_list;
select_dumpvar() { var_list.empty(); row_count= 0;}
2002-10-11 20:49:10 +02:00
~select_dumpvar() {}
2002-10-16 15:55:08 +02:00
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
2002-10-11 20:49:10 +02:00
bool send_data(List<Item> &items);
bool send_eof();
virtual bool check_simple_select() const;
void cleanup();
2002-10-11 20:49:10 +02:00
};
/* Bits in sql_command_flags */
#define CF_CHANGES_DATA (1U << 0)
/* The 2nd bit is unused -- it used to be CF_HAS_ROW_COUNT. */
#define CF_STATUS_COMMAND (1U << 2)
#define CF_SHOW_TABLE_COMMAND (1U << 3)
#define CF_WRITE_LOGS_COMMAND (1U << 4)
/**
Must be set for SQL statements that may contain
Item expressions and/or use joins and tables.
Indicates that the parse tree of such statement may
contain rule-based optimizations that depend on metadata
(i.e. number of columns in a table), and consequently
that the statement must be re-prepared whenever
referenced metadata changes. Must not be set for
statements that themselves change metadata, e.g. RENAME,
ALTER and other DDL, since otherwise will trigger constant
reprepare. Consequently, complex item expressions and
joins are currently prohibited in these statements.
*/
#define CF_REEXECUTION_FRAGILE (1U << 5)
/**
Implicitly commit before the SQL statement is executed.
Statements marked with this flag will cause any active
transaction to end (commit) before proceeding with the
command execution.
This flag should be set for statements that probably can't
be rolled back or that do not expect any previously metadata
locked tables.
*/
#define CF_IMPLICT_COMMIT_BEGIN (1U << 6)
/**
Implicitly commit after the SQL statement.
Statements marked with this flag are automatically committed
at the end of the statement.
This flag should be set for statements that will implicitly
open and take metadata locks on system tables that should not
be carried for the whole duration of a active transaction.
*/
#define CF_IMPLICIT_COMMIT_END (1U << 7)
/**
CF_IMPLICT_COMMIT_BEGIN and CF_IMPLICIT_COMMIT_END are used
to ensure that the active transaction is implicitly committed
before and after every DDL statement and any statement that
modifies our currently non-transactional system tables.
*/
#define CF_AUTO_COMMIT_TRANS (CF_IMPLICT_COMMIT_BEGIN | CF_IMPLICIT_COMMIT_END)
/**
Diagnostic statement.
Diagnostic statements:
- SHOW WARNING
- SHOW ERROR
- GET DIAGNOSTICS (WL#2111)
do not modify the diagnostics area during execution.
*/
#define CF_DIAGNOSTIC_STMT (1U << 8)
Backport of revno: 3685 Bug #48210 FLUSH TABLES WITH READ LOCK deadlocks against concurrent CREATE PROCEDURE This deadlock occured between a) CREATE PROCEDURE (or other commands listed below) b) FLUSH TABLES WITH READ LOCK If the execution of them happened in the following order: - a) opens a table (e.g. mysql.proc) - b) locks the global read lock (or GRL) - a) sleeps inside wait_if_global_read_lock() - b) increases refresh_version and sleeps waiting for old tables to go away Note that a) must start waiting on the GRL before FLUSH increases refresh_version. Otherwise a) won't wait on the GRL and instead close its tables for reopen, allowing FLUSH to complete and thus avoid the deadlock. With this patch the deadlock is avoided by making CREATE PROCEDURE acquire a protection against global read locks before it starts executing. This means that FLUSH TABLES WITH READ LOCK will have to wait until CREATE PROCEDURE completes before acquiring the global read lock, thereby avoiding the deadlock. This is implemented by introducing a new SQL command flag called CF_PROTECT_AGAINST_GRL. Commands marked with this flag will acquire a GRL protection in the beginning of mysql_execute_command(). This patch adds the flag to CREATE, ALTER and DROP for PROCEDURE and FUNCTION, as well as CREATE USER, DROP USER, RENAME USER and REVOKE ALL. All these commands either call open_grant_tables() or open_system_table_for_updated() which make them susceptible for this deadlock. The patch also adds the CF_PROTECT_AGAINST_GRL flag to a number of commands that previously acquired GRL protection in their respective SQLCOM case in mysql_execute_command(). Test case that checks for GRL protection for CREATE PROCEDURE and CREATE USER added to mdl_sync.test.
2009-12-10 15:09:00 +01:00
/**
SQL statements that must be protected against impending global read lock
to prevent deadlock. This deadlock could otherwise happen if the statement
starts waiting for the GRL to go away inside mysql_lock_tables while at the
same time having "old" opened tables. The thread holding the GRL can be
waiting for these "old" opened tables to be closed, causing a deadlock
(FLUSH TABLES WITH READ LOCK).
*/
#define CF_PROTECT_AGAINST_GRL (1U << 10)
/**
Identifies statements that may generate row events
and that may end up in the binary log.
*/
#define CF_CAN_GENERATE_ROW_EVENTS (1U << 11)
/* Bits in server_command_flags */
/**
Skip the increase of the global query id counter. Commonly set for
commands that are stateless (won't cause any change on the server
internal states).
*/
#define CF_SKIP_QUERY_ID (1U << 0)
/**
Skip the increase of the number of statements that clients have
sent to the server. Commonly used for commands that will cause
a statement to be executed but the statement might have not been
sent by the user (ie: stored procedure).
*/
#define CF_SKIP_QUESTIONS (1U << 1)
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
STATUS_VAR *dec_var);
void mark_transaction_to_rollback(THD *thd, bool all);
/*
This prototype is placed here instead of in item_func.h because it
depends on the definition of enum_sql_command, which is in this
file.
*/
int get_var_with_binlog(THD *thd, enum_sql_command sql_command,
LEX_STRING &name, user_var_entry **out_entry);
/* Inline functions */
inline bool add_item_to_list(THD *thd, Item *item)
{
return thd->lex->current_select->add_item_to_list(thd, item);
}
inline bool add_value_to_list(THD *thd, Item *value)
{
return thd->lex->value_list.push_back(value);
}
inline bool add_order_to_list(THD *thd, Item *item, bool asc)
{
return thd->lex->current_select->add_order_to_list(thd, item, asc);
}
inline bool add_group_to_list(THD *thd, Item *item, bool asc)
{
return thd->lex->current_select->add_group_to_list(thd, item, asc);
}
#endif /* MYSQL_SERVER */
/**
The meat of thd_proc_info(THD*, char*), a macro that packs the last
three calling-info parameters.
*/
extern "C"
const char *set_thd_proc_info(void *thd_arg, const char *info,
const char *calling_func,
const char *calling_file,
const unsigned int calling_line);
#define thd_proc_info(thd, msg) \
set_thd_proc_info(thd, msg, __func__, __FILE__, __LINE__)
#endif /* SQL_CLASS_INCLUDED */