mariadb/sql/sql_trigger.cc

2335 lines
72 KiB
C++
Raw Normal View History

2009-12-11 20:45:44 +01:00
/* Copyright (C) 2004-2005 MySQL AB, 2008-2009 Sun Microsystems, Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define MYSQL_LEX 1
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
#include "sql_priv.h"
#include "unireg.h"
#include "sp_head.h"
#include "sql_trigger.h"
#include "sql_parse.h" // parse_sql
#include "parse_file.h"
#include "sp.h"
#include "sql_base.h" // find_temporary_table
#include "lock.h" // wait_if_global_read_lock,
// start_waiting_global_read_lock
#include "sql_show.h" // append_definer, append_identifier
#include "sql_table.h" // build_table_filename,
// check_n_cut_mysql50_prefix
#include "sql_db.h" // get_default_db_collation
#include "sql_acl.h" // *_ACL, is_acl_user
#include "sql_handler.h" // mysql_ha_rm_tables
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
/*************************************************************************/
template <class T>
inline T *alloc_type(MEM_ROOT *m)
{
return (T *) alloc_root(m, sizeof (T));
}
/*
NOTE: Since alloc_type() is declared as inline, alloc_root() calls should
be inlined by the compiler. So, implementation of alloc_root() is not
needed. However, let's put the implementation in object file just in case
of stupid MS or other old compilers.
*/
template LEX_STRING *alloc_type<LEX_STRING>(MEM_ROOT *m);
template ulonglong *alloc_type<ulonglong>(MEM_ROOT *m);
inline LEX_STRING *alloc_lex_string(MEM_ROOT *m)
{
return alloc_type<LEX_STRING>(m);
}
/*************************************************************************/
/**
Trigger_creation_ctx -- creation context of triggers.
*/
class Trigger_creation_ctx : public Stored_program_creation_ctx,
public Sql_alloc
{
public:
static Trigger_creation_ctx *create(THD *thd,
const char *db_name,
const char *table_name,
const LEX_STRING *client_cs_name,
const LEX_STRING *connection_cl_name,
const LEX_STRING *db_cl_name);
public:
virtual Stored_program_creation_ctx *clone(MEM_ROOT *mem_root)
{
return new (mem_root) Trigger_creation_ctx(m_client_cs,
m_connection_cl,
m_db_cl);
}
protected:
virtual Object_creation_ctx *create_backup_ctx(THD *thd) const
{
return new Trigger_creation_ctx(thd);
}
private:
Trigger_creation_ctx(THD *thd)
:Stored_program_creation_ctx(thd)
{ }
Trigger_creation_ctx(CHARSET_INFO *client_cs,
CHARSET_INFO *connection_cl,
CHARSET_INFO *db_cl)
:Stored_program_creation_ctx(client_cs, connection_cl, db_cl)
{ }
};
/**************************************************************************
Trigger_creation_ctx implementation.
**************************************************************************/
Trigger_creation_ctx *
Trigger_creation_ctx::create(THD *thd,
const char *db_name,
const char *table_name,
const LEX_STRING *client_cs_name,
const LEX_STRING *connection_cl_name,
const LEX_STRING *db_cl_name)
{
CHARSET_INFO *client_cs;
CHARSET_INFO *connection_cl;
CHARSET_INFO *db_cl;
bool invalid_creation_ctx= FALSE;
if (resolve_charset(client_cs_name->str,
thd->variables.character_set_client,
&client_cs))
{
sql_print_warning("Trigger for table '%s'.'%s': "
"invalid character_set_client value (%s).",
(const char *) db_name,
(const char *) table_name,
(const char *) client_cs_name->str);
invalid_creation_ctx= TRUE;
}
if (resolve_collation(connection_cl_name->str,
thd->variables.collation_connection,
&connection_cl))
{
sql_print_warning("Trigger for table '%s'.'%s': "
"invalid collation_connection value (%s).",
(const char *) db_name,
(const char *) table_name,
(const char *) connection_cl_name->str);
invalid_creation_ctx= TRUE;
}
if (resolve_collation(db_cl_name->str, NULL, &db_cl))
{
sql_print_warning("Trigger for table '%s'.'%s': "
"invalid database_collation value (%s).",
(const char *) db_name,
(const char *) table_name,
(const char *) db_cl_name->str);
invalid_creation_ctx= TRUE;
}
if (invalid_creation_ctx)
{
push_warning_printf(thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRG_INVALID_CREATION_CTX,
ER(ER_TRG_INVALID_CREATION_CTX),
(const char *) db_name,
(const char *) table_name);
}
/*
If we failed to resolve the database collation, load the default one
from the disk.
*/
if (!db_cl)
db_cl= get_default_db_collation(thd, db_name);
return new Trigger_creation_ctx(client_cs, connection_cl, db_cl);
}
/*************************************************************************/
static const LEX_STRING triggers_file_type=
{ C_STRING_WITH_LEN("TRIGGERS") };
const char * const TRG_EXT= ".TRG";
2007-10-16 21:37:31 +02:00
/**
Table of .TRG file field descriptors.
We have here only one field now because in nearest future .TRG
files will be merged into .FRM files (so we don't need something
like md5 or created fields).
*/
static File_option triggers_file_parameters[]=
{
{
{ C_STRING_WITH_LEN("triggers") },
my_offsetof(class Table_triggers_list, definitions_list),
FILE_OPTIONS_STRLIST
},
{
{ C_STRING_WITH_LEN("sql_modes") },
my_offsetof(class Table_triggers_list, definition_modes_list),
FILE_OPTIONS_ULLLIST
},
{
{ C_STRING_WITH_LEN("definers") },
my_offsetof(class Table_triggers_list, definers_list),
FILE_OPTIONS_STRLIST
},
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
{
{ C_STRING_WITH_LEN("client_cs_names") },
my_offsetof(class Table_triggers_list, client_cs_names),
FILE_OPTIONS_STRLIST
},
{
{ C_STRING_WITH_LEN("connection_cl_names") },
my_offsetof(class Table_triggers_list, connection_cl_names),
FILE_OPTIONS_STRLIST
},
{
{ C_STRING_WITH_LEN("db_cl_names") },
my_offsetof(class Table_triggers_list, db_cl_names),
FILE_OPTIONS_STRLIST
},
{ { 0, 0 }, 0, FILE_OPTIONS_STRING }
};
File_option sql_modes_parameters=
{
{ C_STRING_WITH_LEN("sql_modes") },
my_offsetof(class Table_triggers_list, definition_modes_list),
FILE_OPTIONS_ULLLIST
};
2007-10-16 21:37:31 +02:00
/**
This must be kept up to date whenever a new option is added to the list
above, as it specifies the number of required parameters of the trigger in
.trg file.
*/
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
static const int TRG_NUM_REQUIRED_PARAMETERS= 6;
/*
Structure representing contents of .TRN file which are used to support
database wide trigger namespace.
*/
struct st_trigname
{
LEX_STRING trigger_table;
};
static const LEX_STRING trigname_file_type=
{ C_STRING_WITH_LEN("TRIGGERNAME") };
const char * const TRN_EXT= ".TRN";
static File_option trigname_file_parameters[]=
{
{
{ C_STRING_WITH_LEN("trigger_table")},
offsetof(struct st_trigname, trigger_table),
FILE_OPTIONS_ESTRING
},
{ { 0, 0 }, 0, FILE_OPTIONS_STRING }
};
const LEX_STRING trg_action_time_type_names[]=
{
{ C_STRING_WITH_LEN("BEFORE") },
{ C_STRING_WITH_LEN("AFTER") }
};
const LEX_STRING trg_event_type_names[]=
{
{ C_STRING_WITH_LEN("INSERT") },
{ C_STRING_WITH_LEN("UPDATE") },
{ C_STRING_WITH_LEN("DELETE") }
};
class Handle_old_incorrect_sql_modes_hook: public Unknown_key_hook
{
private:
char *path;
public:
Handle_old_incorrect_sql_modes_hook(char *file_path)
:path(file_path)
{};
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
virtual bool process_unknown_string(char *&unknown_key, uchar* base,
MEM_ROOT *mem_root, char *end);
};
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
class Handle_old_incorrect_trigger_table_hook: public Unknown_key_hook
{
public:
Handle_old_incorrect_trigger_table_hook(char *file_path,
LEX_STRING *trigger_table_arg)
:path(file_path), trigger_table_value(trigger_table_arg)
{};
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
virtual bool process_unknown_string(char *&unknown_key, uchar* base,
MEM_ROOT *mem_root, char *end);
private:
char *path;
LEX_STRING *trigger_table_value;
};
2007-10-16 21:37:31 +02:00
/**
Create or drop trigger for table.
2007-10-16 21:37:31 +02:00
@param thd current thread context (including trigger definition in LEX)
@param tables table list containing one table for which trigger is created.
@param create whenever we create (TRUE) or drop (FALSE) trigger
2007-10-16 21:37:31 +02:00
@note
This function is mainly responsible for opening and locking of table and
invalidation of all its instances in table cache after trigger creation.
Real work on trigger creation/dropping is done inside Table_triggers_list
methods.
2007-10-16 21:37:31 +02:00
@todo
TODO: We should check if user has TRIGGER privilege for table here.
Now we just require SUPER privilege for creating/dropping because
we don't have proper privilege checking for triggers in place yet.
@retval
FALSE Success
2007-10-16 21:37:31 +02:00
@retval
TRUE error
*/
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
{
/*
FIXME: The code below takes too many different paths depending on the
'create' flag, so that the justification for a single function
'mysql_create_or_drop_trigger', compared to two separate functions
'mysql_create_trigger' and 'mysql_drop_trigger' is not apparent.
This is a good candidate for a minor refactoring.
*/
TABLE *table;
bool result= TRUE;
String stmt_query;
bool lock_upgrade_done= FALSE;
MDL_ticket *mdl_ticket= NULL;
Query_tables_list backup;
DBUG_ENTER("mysql_create_or_drop_trigger");
/* Charset of the buffer for statement must be system one. */
stmt_query.set_charset(system_charset_info);
/*
QQ: This function could be merged in mysql_alter_table() function
But do we want this ?
*/
/*
Note that once we will have check for TRIGGER privilege in place we won't
need second part of condition below, since check_access() function also
checks that db is specified.
*/
if (!thd->lex->spname->m_db.length || (create && !tables->db_length))
{
my_error(ER_NO_DB_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
/*
We don't allow creating triggers on tables in the 'mysql' schema
*/
if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db))
{
my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0));
DBUG_RETURN(TRUE);
}
/*
There is no DETERMINISTIC clause for triggers, so can't check it.
But a trigger can in theory be used to do nasty things (if it supported
DROP for example) so we do the check for privileges. For now there is
already a stronger test right above; but when this stronger test will
be removed, the test below will hold. Because triggers have the same
nature as functions regarding binlogging: their body is implicitly
binlogged, so they share the same danger, so trust_function_creators
applies to them too.
*/
if (!trust_function_creators && mysql_bin_log.is_open() &&
!(thd->security_ctx->master_access & SUPER_ACL))
{
my_error(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER, MYF(0));
DBUG_RETURN(TRUE);
}
/*
We don't want perform our operations while global read lock is held
so we have to wait until its end and then prevent it from occurring
2007-11-29 12:42:26 +01:00
again until we are done, unless we are under lock tables. (Acquiring
LOCK_open is not enough because global read lock is held without holding
LOCK_open).
*/
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
if (!thd->locked_tables_mode &&
thd->global_read_lock.wait_if_global_read_lock(thd, FALSE, TRUE))
DBUG_RETURN(TRUE);
if (!create)
{
bool if_exists= thd->lex->drop_if_exists;
/*
Protect the query table list from the temporary and potentially
destructive changes necessary to open the trigger's table.
*/
thd->lex->reset_n_backup_query_tables_list(&backup);
Committing on behalf or Dmitry Lenev: Fix for bug #46947 "Embedded SELECT without FOR UPDATE is causing a lock", with after-review fixes. SELECT statements with subqueries referencing InnoDB tables were acquiring shared locks on rows in these tables when they were executed in REPEATABLE-READ mode and with statement or mixed mode binary logging turned on. This was a regression which were introduced when fixing bug 39843. The problem was that for tables belonging to subqueries parser set TL_READ_DEFAULT as a lock type. In cases when statement/mixed binary logging at open_tables() time this type of lock was converted to TL_READ_NO_INSERT lock at open_tables() time and caused InnoDB engine to acquire shared locks on reads from these tables. Although in some cases such behavior was correct (e.g. for subqueries in DELETE) in case of SELECT it has caused unnecessary locking. This patch tries to solve this problem by rethinking our approach to how we handle locking for SELECT and subqueries. Now we always set TL_READ_DEFAULT lock type for all cases when we read data. When at open_tables() time this lock is interpreted as TL_READ_NO_INSERT or TL_READ depending on whether this statement as a whole or call to function which uses particular table should be written to the binary log or not (if yes then statement should be properly serialized with concurrent statements and stronger lock should be acquired). Test coverage is added for both InnoDB and MyISAM. This patch introduces an "incompatible" change in locking scheme for subqueries used in SELECT ... FOR UPDATE and SELECT .. IN SHARE MODE. In 4.1 the server would use a snapshot InnoDB read for subqueries in SELECT FOR UPDATE and SELECT .. IN SHARE MODE statements, regardless of whether the binary log is on or off. If the user required a different type of read (i.e. locking read), he/she could request so explicitly by providing FOR UPDATE/IN SHARE MODE clause for each individual subquery. On of the patches for 5.0 broke this behaviour (which was not documented or tested), and started to use locking reads fora all subqueries in SELECT ... FOR UPDATE/IN SHARE MODE. This patch restored 4.1 behaviour.
2010-04-28 12:04:11 +02:00
/*
Restore Query_tables_list::sql_command, which was
reset above, as the code that writes the query to the
binary log assumes that this value corresponds to the
statement that is being executed.
*/
thd->lex->sql_command= backup.sql_command;
if (add_table_for_trigger(thd, thd->lex->spname, if_exists, & tables))
goto end;
if (!tables)
{
DBUG_ASSERT(if_exists);
/*
Since the trigger does not exist, there is no associated table,
and therefore :
- no TRIGGER privileges to check,
- no trigger to drop,
- no table to lock/modify,
so the drop statement is successful.
*/
result= FALSE;
/* Still, we need to log the query ... */
stmt_query.append(thd->query(), thd->query_length());
goto end;
}
}
/*
Check that the user has TRIGGER privilege on the subject table.
*/
{
bool err_status;
TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
thd->lex->query_tables_own_last= 0;
err_status= check_table_access(thd, TRIGGER_ACL, tables, FALSE, 1, FALSE);
thd->lex->query_tables_own_last= save_query_tables_own_last;
if (err_status)
goto end;
}
/* We should have only one table in table list. */
DBUG_ASSERT(tables->next_global == 0);
/* We do not allow creation of triggers on temporary tables. */
if (create && find_temporary_table(thd, tables->db, tables->table_name))
{
my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias);
goto end;
}
2007-11-29 12:42:26 +01:00
/* We also don't allow creation of triggers on views. */
tables->required_type= FRMTYPE_TABLE;
/*
Also prevent DROP TRIGGER from opening temporary table which might
shadow base table on which trigger to be dropped is defined.
*/
tables->open_type= OT_BASE_ONLY;
2007-11-29 12:42:26 +01:00
/* Keep consistent with respect to other DDL statements */
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
mysql_ha_rm_tables(thd, tables);
2007-11-29 12:42:26 +01:00
if (thd->locked_tables_mode)
{
/* Under LOCK TABLES we must only accept write locked tables. */
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
if (!(tables->table= find_table_for_mdl_upgrade(thd->open_tables,
tables->db,
tables->table_name,
FALSE)))
2007-11-29 12:42:26 +01:00
goto end;
Bug#26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Bug 26377 - Deadlock with MERGE and FLUSH TABLE Bug 25038 - Waiting TRUNCATE Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Bug 19627 - temporary merge table locking Bug 27660 - Falcon: merge table possible Bug 30273 - merge tables: Can't lock file (errno: 155) The problems were: Bug 26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table 1. A thread trying to lock a MERGE table performs busy waiting while REPAIR TABLE or a similar table administration task is ongoing on one or more of its MyISAM tables. 2. A thread trying to lock a MERGE table performs busy waiting until all threads that did REPAIR TABLE or similar table administration tasks on one or more of its MyISAM tables in LOCK TABLES segments do UNLOCK TABLES. The difference against problem #1 is that the busy waiting takes place *after* the administration task. It is terminated by UNLOCK TABLES only. 3. Two FLUSH TABLES within a LOCK TABLES segment can invalidate the lock. This does *not* require a MERGE table. The first FLUSH TABLES can be replaced by any statement that requires other threads to reopen the table. In 5.0 and 5.1 a single FLUSH TABLES can provoke the problem. Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Trying DML on a MERGE table, which has a child locked and repaired by another thread, made an infinite loop in the server. Bug 26377 - Deadlock with MERGE and FLUSH TABLE Locking a MERGE table and its children in parent-child order and flushing the child deadlocked the server. Bug 25038 - Waiting TRUNCATE Truncating a MERGE child, while the MERGE table was in use, let the truncate fail instead of waiting for the table to become free. Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Repairing a child of an open MERGE table corrupted the child. It was necessary to FLUSH the child first. Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Flushing and optimizing locked MERGE children crashed the server. Bug 19627 - temporary merge table locking Use of a temporary MERGE table with non-temporary children could corrupt the children. Temporary tables are never locked. So we do now prohibit non-temporary chidlren of a temporary MERGE table. Bug 27660 - Falcon: merge table possible It was possible to create a MERGE table with non-MyISAM children. Bug 30273 - merge tables: Can't lock file (errno: 155) This was a Windows-only bug. Table administration statements sometimes failed with "Can't lock file (errno: 155)". These bugs are fixed by a new implementation of MERGE table open. When opening a MERGE table in open_tables() we do now add the child tables to the list of tables to be opened by open_tables() (the "query_list"). The children are not opened in the handler at this stage. After opening the parent, open_tables() opens each child from the now extended query_list. When the last child is opened, we remove the children from the query_list again and attach the children to the parent. This behaves similar to the old open. However it does not open the MyISAM tables directly, but grabs them from the already open children. When closing a MERGE table in close_thread_table() we detach the children only. Closing of the children is done implicitly because they are in thd->open_tables. For more detail see the comment at the top of ha_myisammrg.cc. Changed from open_ltable() to open_and_lock_tables() in all places that can be relevant for MERGE tables. The latter can handle tables added to the list on the fly. When open_ltable() was used in a loop over a list of tables, the list must be temporarily terminated after every table for open_and_lock_tables(). table_list->required_type is set to FRMTYPE_TABLE to avoid open of special tables. Handling of derived tables is suppressed. These details are handled by the new function open_n_lock_single_table(), which has nearly the same signature as open_ltable() and can replace it in most cases. In reopen_tables() some of the tables open by a thread can be closed and reopened. When a MERGE child is affected, the parent must be closed and reopened too. Closing of the parent is forced before the first child is closed. Reopen happens in the order of thd->open_tables. MERGE parents do not attach their children automatically at open. This is done after all tables are reopened. So all children are open when attaching them. Special lock handling like mysql_lock_abort() or mysql_lock_remove() needs to be suppressed for MERGE children or forwarded to the parent. This depends on the situation. In loops over all open tables one suppresses child lock handling. When a single table is touched, forwarding is done. Behavioral changes: =================== This patch changes the behavior of temporary MERGE tables. Temporary MERGE must have temporary children. The old behavior was wrong. A temporary table is not locked. Hence even non-temporary children were not locked. See Bug 19627 - temporary merge table locking. You cannot change the union list of a non-temporary MERGE table when LOCK TABLES is in effect. The following does *not* work: CREATE TABLE m1 ... ENGINE=MRG_MYISAM ...; LOCK TABLES t1 WRITE, t2 WRITE, m1 WRITE; ALTER TABLE m1 ... UNION=(t1,t2) ...; However, you can do this with a temporary MERGE table. You cannot create a MERGE table with CREATE ... SELECT, neither as a temporary MERGE table, nor as a non-temporary MERGE table. CREATE TABLE m1 ... ENGINE=MRG_MYISAM ... SELECT ...; Gives error message: table is not BASE TABLE.
2007-11-15 20:25:43 +01:00
}
2007-11-29 12:42:26 +01:00
else
Bug#26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Bug 26377 - Deadlock with MERGE and FLUSH TABLE Bug 25038 - Waiting TRUNCATE Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Bug 19627 - temporary merge table locking Bug 27660 - Falcon: merge table possible Bug 30273 - merge tables: Can't lock file (errno: 155) The problems were: Bug 26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table 1. A thread trying to lock a MERGE table performs busy waiting while REPAIR TABLE or a similar table administration task is ongoing on one or more of its MyISAM tables. 2. A thread trying to lock a MERGE table performs busy waiting until all threads that did REPAIR TABLE or similar table administration tasks on one or more of its MyISAM tables in LOCK TABLES segments do UNLOCK TABLES. The difference against problem #1 is that the busy waiting takes place *after* the administration task. It is terminated by UNLOCK TABLES only. 3. Two FLUSH TABLES within a LOCK TABLES segment can invalidate the lock. This does *not* require a MERGE table. The first FLUSH TABLES can be replaced by any statement that requires other threads to reopen the table. In 5.0 and 5.1 a single FLUSH TABLES can provoke the problem. Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Trying DML on a MERGE table, which has a child locked and repaired by another thread, made an infinite loop in the server. Bug 26377 - Deadlock with MERGE and FLUSH TABLE Locking a MERGE table and its children in parent-child order and flushing the child deadlocked the server. Bug 25038 - Waiting TRUNCATE Truncating a MERGE child, while the MERGE table was in use, let the truncate fail instead of waiting for the table to become free. Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Repairing a child of an open MERGE table corrupted the child. It was necessary to FLUSH the child first. Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Flushing and optimizing locked MERGE children crashed the server. Bug 19627 - temporary merge table locking Use of a temporary MERGE table with non-temporary children could corrupt the children. Temporary tables are never locked. So we do now prohibit non-temporary chidlren of a temporary MERGE table. Bug 27660 - Falcon: merge table possible It was possible to create a MERGE table with non-MyISAM children. Bug 30273 - merge tables: Can't lock file (errno: 155) This was a Windows-only bug. Table administration statements sometimes failed with "Can't lock file (errno: 155)". These bugs are fixed by a new implementation of MERGE table open. When opening a MERGE table in open_tables() we do now add the child tables to the list of tables to be opened by open_tables() (the "query_list"). The children are not opened in the handler at this stage. After opening the parent, open_tables() opens each child from the now extended query_list. When the last child is opened, we remove the children from the query_list again and attach the children to the parent. This behaves similar to the old open. However it does not open the MyISAM tables directly, but grabs them from the already open children. When closing a MERGE table in close_thread_table() we detach the children only. Closing of the children is done implicitly because they are in thd->open_tables. For more detail see the comment at the top of ha_myisammrg.cc. Changed from open_ltable() to open_and_lock_tables() in all places that can be relevant for MERGE tables. The latter can handle tables added to the list on the fly. When open_ltable() was used in a loop over a list of tables, the list must be temporarily terminated after every table for open_and_lock_tables(). table_list->required_type is set to FRMTYPE_TABLE to avoid open of special tables. Handling of derived tables is suppressed. These details are handled by the new function open_n_lock_single_table(), which has nearly the same signature as open_ltable() and can replace it in most cases. In reopen_tables() some of the tables open by a thread can be closed and reopened. When a MERGE child is affected, the parent must be closed and reopened too. Closing of the parent is forced before the first child is closed. Reopen happens in the order of thd->open_tables. MERGE parents do not attach their children automatically at open. This is done after all tables are reopened. So all children are open when attaching them. Special lock handling like mysql_lock_abort() or mysql_lock_remove() needs to be suppressed for MERGE children or forwarded to the parent. This depends on the situation. In loops over all open tables one suppresses child lock handling. When a single table is touched, forwarding is done. Behavioral changes: =================== This patch changes the behavior of temporary MERGE tables. Temporary MERGE must have temporary children. The old behavior was wrong. A temporary table is not locked. Hence even non-temporary children were not locked. See Bug 19627 - temporary merge table locking. You cannot change the union list of a non-temporary MERGE table when LOCK TABLES is in effect. The following does *not* work: CREATE TABLE m1 ... ENGINE=MRG_MYISAM ...; LOCK TABLES t1 WRITE, t2 WRITE, m1 WRITE; ALTER TABLE m1 ... UNION=(t1,t2) ...; However, you can do this with a temporary MERGE table. You cannot create a MERGE table with CREATE ... SELECT, neither as a temporary MERGE table, nor as a non-temporary MERGE table. CREATE TABLE m1 ... ENGINE=MRG_MYISAM ... SELECT ...; Gives error message: table is not BASE TABLE.
2007-11-15 20:25:43 +01:00
{
tables->table= open_n_lock_single_table(thd, tables,
Fix for bug #51263 "Deadlock between transactional SELECT and ALTER TABLE ... REBUILD PARTITION". ALTER TABLE on InnoDB table (including partitioned tables) acquired exclusive locks on rows of table being altered. In cases when there was concurrent transaction which did locking reads from this table this sometimes led to a deadlock which was not detected by MDL subsystem nor by InnoDB engine (and was reported only after exceeding innodb_lock_wait_timeout). This problem stemmed from the fact that ALTER TABLE acquired TL_WRITE_ALLOW_READ lock on table being altered. This lock was interpreted as a write lock and thus for table being altered handler::external_lock() method was called with F_WRLCK as an argument. As result InnoDB engine treated ALTER TABLE as an operation which is going to change data and acquired LOCK_X locks on rows being read from old version of table. In case when there was a transaction which already acquired SR metadata lock on table and some LOCK_S locks on its rows (e.g. by using it in subquery of DML statement) concurrent ALTER TABLE was blocked at the moment when it tried to acquire LOCK_X lock before reading one of these rows. The transaction's attempt to acquire SW metadata lock on table being altered led to deadlock, since it had to wait for ALTER TABLE to release SNW lock. This deadlock was not detected and got resolved only after timeout expiring because waiting were happening in two different subsystems. Similar deadlocks could have occured in other situations. This patch tries to solve the problem by changing ALTER TABLE implementation to use TL_READ_NO_INSERT lock instead of TL_WRITE_ALLOW_READ. After this step handler::external_lock() is called with F_RDLCK as an argument and InnoDB engine correctly interprets ALTER TABLE as operation which only reads data from original version of table. Thanks to this ALTER TABLE acquires only LOCK_S locks on rows it reads. This, in its turn, causes inter-subsystem deadlocks to go away, as all potential lock conflicts and thus deadlocks will be limited to metadata locking subsystem: - When ALTER TABLE reads rows from table being altered it can't encounter any locks which conflict with LOCK_S row locks. There should be no concurrent transactions holding LOCK_X row locks. Such a transaction should have been acquired SW metadata lock on table first which would have conflicted with ALTER's SNW lock. - Vice versa, when DML which runs concurrently with ALTER TABLE tries to lock row it should be requesting only LOCK_S lock which is compatible with locks acquired by ALTER, as otherwise such DML must own an SW metadata lock on table which would be incompatible with ALTER's SNW lock.
2010-05-26 14:18:08 +02:00
TL_READ_NO_INSERT, 0);
if (! tables->table)
2007-11-29 12:42:26 +01:00
goto end;
tables->table->use_all_columns();
}
table= tables->table;
Backport of: ------------------------------------------------------------ revno: 2617.23.18 committer: Davi Arnaut <Davi.Arnaut@Sun.COM> branch nick: 4284-6.0 timestamp: Mon 2009-03-02 18:18:26 -0300 message: Bug#989: If DROP TABLE while there's an active transaction, wrong binlog order WL#4284: Transactional DDL locking This is a prerequisite patch: These changes are intended to split lock requests from granted locks and to allow the memory and lifetime of granted locks to be managed within the MDL subsystem. Furthermore, tickets can now be shared and therefore are used to satisfy multiple lock requests, but only shared locks can be recursive. The problem is that the MDL subsystem morphs lock requests into granted locks locks but does not manage the memory and lifetime of lock requests, and hence, does not manage the memory of granted locks either. This can be problematic because it puts the burden of tracking references on the users of the subsystem and it can't be easily done in transactional contexts where the locks have to be kept around for the duration of a transaction. Another issue is that recursive locks (when the context trying to acquire a lock already holds a lock on the same object) requires that each time the lock is granted, a unique lock request/granted lock structure structure must be kept around until the lock is released. This can lead to memory leaks in transactional contexts as locks taken during the transaction should only be released at the end of the transaction. This also leads to unnecessary wake ups (broadcasts) in the MDL subsystem if the context still holds a equivalent of the lock being released. These issues are exacerbated due to the fact that WL#4284 low-level design says that the implementation should "2) Store metadata locks in transaction memory root, rather than statement memory root" but this is not possible because a memory root, as implemented in mysys, requires all objects allocated from it to be freed all at once. This patch combines review input and significant code contributions from Konstantin Osipov (kostja) and Dmitri Lenev (dlenev).
2009-12-04 00:29:40 +01:00
/* Later on we will need it to downgrade the lock */
mdl_ticket= table->mdl_ticket;
Backport of: ------------------------------------------------------------ revno: 2617.23.18 committer: Davi Arnaut <Davi.Arnaut@Sun.COM> branch nick: 4284-6.0 timestamp: Mon 2009-03-02 18:18:26 -0300 message: Bug#989: If DROP TABLE while there's an active transaction, wrong binlog order WL#4284: Transactional DDL locking This is a prerequisite patch: These changes are intended to split lock requests from granted locks and to allow the memory and lifetime of granted locks to be managed within the MDL subsystem. Furthermore, tickets can now be shared and therefore are used to satisfy multiple lock requests, but only shared locks can be recursive. The problem is that the MDL subsystem morphs lock requests into granted locks locks but does not manage the memory and lifetime of lock requests, and hence, does not manage the memory of granted locks either. This can be problematic because it puts the burden of tracking references on the users of the subsystem and it can't be easily done in transactional contexts where the locks have to be kept around for the duration of a transaction. Another issue is that recursive locks (when the context trying to acquire a lock already holds a lock on the same object) requires that each time the lock is granted, a unique lock request/granted lock structure structure must be kept around until the lock is released. This can lead to memory leaks in transactional contexts as locks taken during the transaction should only be released at the end of the transaction. This also leads to unnecessary wake ups (broadcasts) in the MDL subsystem if the context still holds a equivalent of the lock being released. These issues are exacerbated due to the fact that WL#4284 low-level design says that the implementation should "2) Store metadata locks in transaction memory root, rather than statement memory root" but this is not possible because a memory root, as implemented in mysys, requires all objects allocated from it to be freed all at once. This patch combines review input and significant code contributions from Konstantin Osipov (kostja) and Dmitri Lenev (dlenev).
2009-12-04 00:29:40 +01:00
if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
goto end;
lock_upgrade_done= TRUE;
if (!table->triggers)
{
if (!create)
{
my_error(ER_TRG_DOES_NOT_EXIST, MYF(0));
goto end;
}
if (!(table->triggers= new (&table->mem_root) Table_triggers_list(table)))
goto end;
}
2010-02-02 00:22:16 +01:00
mysql_mutex_lock(&LOCK_open);
result= (create ?
table->triggers->create_trigger(thd, tables, &stmt_query):
table->triggers->drop_trigger(thd, tables, &stmt_query));
2010-02-02 00:22:16 +01:00
mysql_mutex_unlock(&LOCK_open);
if (result)
goto end;
Bug#26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Bug 26377 - Deadlock with MERGE and FLUSH TABLE Bug 25038 - Waiting TRUNCATE Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Bug 19627 - temporary merge table locking Bug 27660 - Falcon: merge table possible Bug 30273 - merge tables: Can't lock file (errno: 155) The problems were: Bug 26379 - Combination of FLUSH TABLE and REPAIR TABLE corrupts a MERGE table 1. A thread trying to lock a MERGE table performs busy waiting while REPAIR TABLE or a similar table administration task is ongoing on one or more of its MyISAM tables. 2. A thread trying to lock a MERGE table performs busy waiting until all threads that did REPAIR TABLE or similar table administration tasks on one or more of its MyISAM tables in LOCK TABLES segments do UNLOCK TABLES. The difference against problem #1 is that the busy waiting takes place *after* the administration task. It is terminated by UNLOCK TABLES only. 3. Two FLUSH TABLES within a LOCK TABLES segment can invalidate the lock. This does *not* require a MERGE table. The first FLUSH TABLES can be replaced by any statement that requires other threads to reopen the table. In 5.0 and 5.1 a single FLUSH TABLES can provoke the problem. Bug 26867 - LOCK TABLES + REPAIR + merge table result in memory/cpu hogging Trying DML on a MERGE table, which has a child locked and repaired by another thread, made an infinite loop in the server. Bug 26377 - Deadlock with MERGE and FLUSH TABLE Locking a MERGE table and its children in parent-child order and flushing the child deadlocked the server. Bug 25038 - Waiting TRUNCATE Truncating a MERGE child, while the MERGE table was in use, let the truncate fail instead of waiting for the table to become free. Bug 25700 - merge base tables get corrupted by optimize/analyze/repair table Repairing a child of an open MERGE table corrupted the child. It was necessary to FLUSH the child first. Bug 30275 - Merge tables: flush tables or unlock tables causes server to crash Flushing and optimizing locked MERGE children crashed the server. Bug 19627 - temporary merge table locking Use of a temporary MERGE table with non-temporary children could corrupt the children. Temporary tables are never locked. So we do now prohibit non-temporary chidlren of a temporary MERGE table. Bug 27660 - Falcon: merge table possible It was possible to create a MERGE table with non-MyISAM children. Bug 30273 - merge tables: Can't lock file (errno: 155) This was a Windows-only bug. Table administration statements sometimes failed with "Can't lock file (errno: 155)". These bugs are fixed by a new implementation of MERGE table open. When opening a MERGE table in open_tables() we do now add the child tables to the list of tables to be opened by open_tables() (the "query_list"). The children are not opened in the handler at this stage. After opening the parent, open_tables() opens each child from the now extended query_list. When the last child is opened, we remove the children from the query_list again and attach the children to the parent. This behaves similar to the old open. However it does not open the MyISAM tables directly, but grabs them from the already open children. When closing a MERGE table in close_thread_table() we detach the children only. Closing of the children is done implicitly because they are in thd->open_tables. For more detail see the comment at the top of ha_myisammrg.cc. Changed from open_ltable() to open_and_lock_tables() in all places that can be relevant for MERGE tables. The latter can handle tables added to the list on the fly. When open_ltable() was used in a loop over a list of tables, the list must be temporarily terminated after every table for open_and_lock_tables(). table_list->required_type is set to FRMTYPE_TABLE to avoid open of special tables. Handling of derived tables is suppressed. These details are handled by the new function open_n_lock_single_table(), which has nearly the same signature as open_ltable() and can replace it in most cases. In reopen_tables() some of the tables open by a thread can be closed and reopened. When a MERGE child is affected, the parent must be closed and reopened too. Closing of the parent is forced before the first child is closed. Reopen happens in the order of thd->open_tables. MERGE parents do not attach their children automatically at open. This is done after all tables are reopened. So all children are open when attaching them. Special lock handling like mysql_lock_abort() or mysql_lock_remove() needs to be suppressed for MERGE children or forwarded to the parent. This depends on the situation. In loops over all open tables one suppresses child lock handling. When a single table is touched, forwarding is done. Behavioral changes: =================== This patch changes the behavior of temporary MERGE tables. Temporary MERGE must have temporary children. The old behavior was wrong. A temporary table is not locked. Hence even non-temporary children were not locked. See Bug 19627 - temporary merge table locking. You cannot change the union list of a non-temporary MERGE table when LOCK TABLES is in effect. The following does *not* work: CREATE TABLE m1 ... ENGINE=MRG_MYISAM ...; LOCK TABLES t1 WRITE, t2 WRITE, m1 WRITE; ALTER TABLE m1 ... UNION=(t1,t2) ...; However, you can do this with a temporary MERGE table. You cannot create a MERGE table with CREATE ... SELECT, neither as a temporary MERGE table, nor as a non-temporary MERGE table. CREATE TABLE m1 ... ENGINE=MRG_MYISAM ... SELECT ...; Gives error message: table is not BASE TABLE.
2007-11-15 20:25:43 +01:00
close_all_tables_for_name(thd, table->s, FALSE);
/*
Reopen the table if we were under LOCK TABLES.
Ignore the return value for now. It's better to
keep master/slave in consistent state.
*/
thd->locked_tables_list.reopen_tables(thd);
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
end:
if (!result)
{
result= write_bin_log(thd, TRUE, stmt_query.ptr(), stmt_query.length());
}
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
/*
If we are under LOCK TABLES we should restore original state of
meta-data locks. Otherwise all locks will be released along
with the implicit commit.
Initial import of WL#3726 "DDL locking for all metadata objects". Backport of: ------------------------------------------------------------ revno: 2630.4.1 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Fri 2008-05-23 17:54:03 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ------------------------------------------------------------ This is the first patch in series. It transforms the metadata locking subsystem to use a dedicated module (mdl.h,cc). No significant changes in the locking protocol. The import passes the test suite with the exception of deprecated/removed 6.0 features, and MERGE tables. The latter are subject to a fix by WL#4144. Unfortunately, the original changeset comments got lost in a merge, thus this import has its own (largely insufficient) comments. This patch fixes Bug#25144 "replication / binlog with view breaks". Warning: this patch introduces an incompatible change: Under LOCK TABLES, it's no longer possible to FLUSH a table that was not locked for WRITE. Under LOCK TABLES, it's no longer possible to DROP a table or VIEW that was not locked for WRITE. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.2 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:03:45 +0400 message: WL#3726 "DDL locking for all metadata objects". After review fixes in progress. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.3 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 14:08:51 +0400 message: WL#3726 "DDL locking for all metadata objects" Fixed failing Windows builds by adding mdl.cc to the lists of files needed to build server/libmysqld on Windows. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.4 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sat 2008-05-24 21:57:58 +0400 message: WL#3726 "DDL locking for all metadata objects". Fix for assert failures in kill.test which occured when one tried to kill ALTER TABLE statement on merge table while it was waiting in wait_while_table_is_used() for other connections to close this table. These assert failures stemmed from the fact that cleanup code in this case assumed that temporary table representing new version of table was open with adding to THD::temporary_tables list while code which were opening this temporary table wasn't always fulfilling this. This patch changes code that opens new version of table to always do this linking in. It also streamlines cleanup process for cases when error occurs while we have new version of table open. ****** WL#3726 "DDL locking for all metadata objects" Add libmysqld/mdl.cc to .bzrignore. ****** Backport of: ------------------------------------------------------------ revno: 2630.4.6 committer: Dmitry Lenev <dlenev@mysql.com> branch nick: mysql-6.0-3726-w timestamp: Sun 2008-05-25 00:33:22 +0400 message: WL#3726 "DDL locking for all metadata objects". Addition to the fix of assert failures in kill.test caused by changes for this worklog. Make sure we close the new table only once.
2009-11-30 16:55:03 +01:00
*/
if (thd->locked_tables_mode && tables && lock_upgrade_done)
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
2007-11-29 12:42:26 +01:00
/* Restore the query table list. Used only for drop trigger. */
if (!create)
thd->lex->restore_backup_query_tables_list(&backup);
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
if (thd->global_read_lock.has_protection())
thd->global_read_lock.start_waiting_global_read_lock(thd);
if (!result)
my_ok(thd);
DBUG_RETURN(result);
}
2007-10-16 21:37:31 +02:00
/**
Create trigger for table.
2007-10-16 21:37:31 +02:00
@param thd current thread context (including trigger definition in
LEX)
@param tables table list containing one open table for which the
trigger is created.
@param[out] stmt_query after successful return, this string contains
well-formed statement for creation this trigger.
2007-10-16 21:37:31 +02:00
@note
- Assumes that trigger name is fully qualified.
- NULL-string means the following LEX_STRING instance:
2007-10-16 21:37:31 +02:00
{ str = 0; length = 0 }.
- In other words, definer_user and definer_host should contain
2007-10-16 21:37:31 +02:00
simultaneously NULL-strings (non-SUID/old trigger) or valid strings
(SUID/new trigger).
2007-10-16 21:37:31 +02:00
@retval
False success
@retval
True error
*/
bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
String *stmt_query)
{
LEX *lex= thd->lex;
TABLE *table= tables->table;
char file_buff[FN_REFLEN], trigname_buff[FN_REFLEN];
LEX_STRING file, trigname_file;
LEX_STRING *trg_def;
LEX_STRING definer_user;
LEX_STRING definer_host;
ulonglong *trg_sql_mode;
char trg_definer_holder[USER_HOST_BUFF_SIZE];
LEX_STRING *trg_definer;
Item_trigger_field *trg_field;
struct st_trigname trigname;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
LEX_STRING *trg_client_cs_name;
LEX_STRING *trg_connection_cl_name;
LEX_STRING *trg_db_cl_name;
/* Trigger must be in the same schema as target table. */
if (my_strcasecmp(table_alias_charset, table->s->db.str,
lex->spname->m_db.str))
{
my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0));
return 1;
}
/* We don't allow creation of several triggers of the same type yet */
2007-02-11 13:23:23 +01:00
if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time] != 0)
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
"multiple triggers with the same action time"
" and event for one table");
return 1;
}
if (!lex->definer)
{
/*
DEFINER-clause is missing.
If we are in slave thread, this means that we received CREATE TRIGGER
from the master, that does not support definer in triggers. So, we
should mark this trigger as non-SUID. Note that this does not happen
when we parse triggers' definitions during opening .TRG file.
LEX::definer is ignored in that case.
Otherwise, we should use CURRENT_USER() as definer.
NOTE: when CREATE TRIGGER statement is allowed to be executed in PS/SP,
it will be required to create the definer below in persistent MEM_ROOT
of PS/SP.
*/
if (!thd->slave_thread)
{
if (!(lex->definer= create_default_definer(thd)))
return 1;
}
}
/*
If the specified definer differs from the current user, we should check
that the current user has SUPER privilege (in order to create trigger
under another user one must have SUPER privilege).
*/
if (lex->definer &&
(strcmp(lex->definer->user.str, thd->security_ctx->priv_user) ||
my_strcasecmp(system_charset_info,
lex->definer->host.str,
thd->security_ctx->priv_host)))
{
if (check_global_access(thd, SUPER_ACL))
{
my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER");
return TRUE;
}
}
/*
Let us check if all references to fields in old/new versions of row in
this trigger are ok.
NOTE: We do it here more from ease of use standpoint. We still have to
do some checks on each execution. E.g. we can catch privilege changes
only during execution. Also in near future, when we will allow access
to other tables from trigger we won't be able to catch changes in other
tables...
Since we don't plan to access to contents of the fields it does not
matter that we choose for both OLD and NEW values the same versions
of Field objects here.
*/
old_field= new_field= table->field;
for (trg_field= lex->trg_table_fields.first;
trg_field; trg_field= trg_field->next_trg_field)
{
/*
NOTE: now we do not check privileges at CREATE TRIGGER time. This will
be changed in the future.
*/
trg_field->setup_field(thd, table, NULL);
2005-02-08 23:50:45 +01:00
if (!trg_field->fixed &&
trg_field->fix_fields(thd, (Item **)0))
return 1;
}
/*
Here we are creating file with triggers and save all triggers in it.
sql_create_definition_file() files handles renaming and backup of older
versions
*/
file.length= build_table_filename(file_buff, FN_REFLEN - 1,
tables->db, tables->table_name,
TRG_EXT, 0);
file.str= file_buff;
trigname_file.length= build_table_filename(trigname_buff, FN_REFLEN-1,
tables->db,
lex->spname->m_name.str,
TRN_EXT, 0);
trigname_file.str= trigname_buff;
/* Use the filesystem to enforce trigger namespace constraints. */
if (!access(trigname_buff, F_OK))
{
my_error(ER_TRG_ALREADY_EXISTS, MYF(0));
return 1;
}
trigname.trigger_table.str= tables->table_name;
trigname.trigger_table.length= tables->table_name_length;
if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
(uchar*)&trigname, trigname_file_parameters))
return 1;
/*
Soon we will invalidate table object and thus Table_triggers_list object
so don't care about place to which trg_def->ptr points and other
invariants (e.g. we don't bother to update names_list)
QQ: Hmm... probably we should not care about setting up active thread
mem_root too.
*/
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (!(trg_def= alloc_lex_string(&table->mem_root)) ||
definitions_list.push_back(trg_def, &table->mem_root) ||
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
!(trg_sql_mode= alloc_type<ulonglong>(&table->mem_root)) ||
definition_modes_list.push_back(trg_sql_mode, &table->mem_root) ||
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
!(trg_definer= alloc_lex_string(&table->mem_root)) ||
definers_list.push_back(trg_definer, &table->mem_root) ||
!(trg_client_cs_name= alloc_lex_string(&table->mem_root)) ||
client_cs_names.push_back(trg_client_cs_name, &table->mem_root) ||
!(trg_connection_cl_name= alloc_lex_string(&table->mem_root)) ||
connection_cl_names.push_back(trg_connection_cl_name, &table->mem_root) ||
!(trg_db_cl_name= alloc_lex_string(&table->mem_root)) ||
db_cl_names.push_back(trg_db_cl_name, &table->mem_root))
{
goto err_with_cleanup;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
}
*trg_sql_mode= thd->variables.sql_mode;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (lex->definer && !is_acl_user(lex->definer->host.str,
lex->definer->user.str))
{
push_warning_printf(thd,
MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_NO_SUCH_USER,
ER(ER_NO_SUCH_USER),
lex->definer->user.str,
lex->definer->host.str);
}
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
if (lex->definer)
{
/* SUID trigger. */
definer_user= lex->definer->user;
definer_host= lex->definer->host;
trg_definer->str= trg_definer_holder;
trg_definer->length= strxmov(trg_definer->str, definer_user.str, "@",
definer_host.str, NullS) - trg_definer->str;
}
else
{
/* non-SUID trigger. */
definer_user.str= 0;
definer_user.length= 0;
definer_host.str= 0;
definer_host.length= 0;
trg_definer->str= (char*) "";
trg_definer->length= 0;
}
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
/*
Fill character set information:
- client character set contains charset info only;
- connection collation contains pair {character set, collation};
- database collation contains pair {character set, collation};
*/
lex_string_set(trg_client_cs_name, thd->charset()->csname);
lex_string_set(trg_connection_cl_name,
thd->variables.collation_connection->name);
lex_string_set(trg_db_cl_name,
get_default_db_collation(thd, tables->db)->name);
/*
Create well-formed trigger definition query. Original query is not
appropriated, because definer-clause can be not truncated.
*/
stmt_query->append(STRING_WITH_LEN("CREATE "));
if (trg_definer)
{
/*
Append definer-clause if the trigger is SUID (a usual trigger in
new MySQL versions).
*/
append_definer(thd, stmt_query, &definer_user, &definer_host);
}
Bug#25411 (trigger code truncated), PART II Bug 28127 (Some valid identifiers names are not parsed correctly) Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) This patch is the second part of a major cleanup, required to fix Bug 25411 (trigger code truncated). The root cause of the issue stems from the function skip_rear_comments, which was a work around to remove "extra" "*/" characters from the query text, when parsing a query and reusing the text fragments to represent a view, trigger, function or stored procedure. The reason for this work around is that "special comments", like /*!50002 XXX */, were not parsed properly, so that a query like: AAA /*!50002 BBB */ CCC would be seen by the parser as "AAA BBB */ CCC" when the current version is greater or equal to 5.0.2 The root cause of this stems from how special comments are parsed. Special comments are really out-of-bound text that appear inside a query, that affects how the parser behave. In nature, /*!50002 XXX */ in MySQL is similar to the C concept of preprocessing : #if VERSION >= 50002 XXX #endif Depending on the current VERSION of the server, either the special comment should be expanded or it should be ignored, but in all cases the "text" of the query should be re-written to strip the "/*!50002" and "*/" markers, which does not belong to the SQL language itself. Prior to this fix, these markers would leak into : - the storage format for VIEW, - the storage format for FUNCTION, - the storage format for FUNCTION parameters, in mysql.proc (param_list), - the storage format for PROCEDURE, - the storage format for PROCEDURE parameters, in mysql.proc (param_list), - the storage format for TRIGGER, - the binary log used for replication. In all cases, not only this cause format corruption, but also provide a vector for dormant security issues, by allowing to tunnel code that will be activated after an upgrade. The proper solution is to deal with special comments strictly during parsing, when accepting a query from the outside world. Once a query is parsed and an object is created with a persistant representation, this object should not arbitrarily mutate after an upgrade. In short, special comments are a useful but limited feature for MYSQLdump, when used at an *interface* level to facilitate import/export, but bloating the server *internal* storage format is *not* the proper way to deal with configuration management of the user logic. With this fix: - the Lex_input_stream class now acts as a comment pre-processor, and either expands or ignore special comments on the fly. - MYSQLlex and sql_yacc.yy have been cleaned up to strictly use the public interface of Lex_input_stream. In particular, how the input stream accepts or rejects a character is private to Lex_input_stream, and the internal buffer pointers of that class are strictly private, and should not be tempered with during parsing. This caused many changes mostly in sql_lex.cc. During the code cleanup in case MY_LEX_NUMBER_IDENT, Bug 28127 (Some valid identifiers names are not parsed correctly) was found and fixed. By parsing special comments properly, and removing the function 'skip_rear_comments' [sic], Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) has been fixed as well.
2007-06-12 23:23:58 +02:00
LEX_STRING stmt_definition;
stmt_definition.str= (char*) thd->lex->stmt_definition_begin;
stmt_definition.length= thd->lex->stmt_definition_end
- thd->lex->stmt_definition_begin;
trim_whitespace(thd->charset(), & stmt_definition);
stmt_query->append(stmt_definition.str, stmt_definition.length);
trg_def->str= stmt_query->c_ptr();
trg_def->length= stmt_query->length();
/* Create trigger definition file. */
if (!sql_create_definition_file(NULL, &file, &triggers_file_type,
(uchar*)this, triggers_file_parameters))
return 0;
err_with_cleanup:
mysql_file_delete(key_file_trn, trigname_buff, MYF(MY_WME));
return 1;
}
2007-10-16 21:37:31 +02:00
/**
Deletes the .TRG file for a table.
@param path char buffer of size FN_REFLEN to be used
for constructing path to .TRG file.
@param db table's database name
@param table_name table's name
@retval
False success
@retval
True error
*/
static bool rm_trigger_file(char *path, const char *db,
const char *table_name)
{
build_table_filename(path, FN_REFLEN-1, db, table_name, TRG_EXT, 0);
return mysql_file_delete(key_file_trg, path, MYF(MY_WME));
}
2007-10-16 21:37:31 +02:00
/**
Deletes the .TRN file for a trigger.
@param path char buffer of size FN_REFLEN to be used
for constructing path to .TRN file.
@param db trigger's database name
@param table_name trigger's name
@retval
False success
@retval
True error
*/
static bool rm_trigname_file(char *path, const char *db,
const char *trigger_name)
{
build_table_filename(path, FN_REFLEN - 1, db, trigger_name, TRN_EXT, 0);
return mysql_file_delete(key_file_trn, path, MYF(MY_WME));
}
2007-10-16 21:37:31 +02:00
/**
Helper function that saves .TRG file for Table_triggers_list object.
2007-10-16 21:37:31 +02:00
@param triggers Table_triggers_list object for which file should be saved
@param db Name of database for subject table
@param table_name Name of subject table
2007-10-16 21:37:31 +02:00
@retval
FALSE Success
2007-10-16 21:37:31 +02:00
@retval
TRUE Error
*/
static bool save_trigger_file(Table_triggers_list *triggers, const char *db,
const char *table_name)
{
char file_buff[FN_REFLEN];
LEX_STRING file;
file.length= build_table_filename(file_buff, FN_REFLEN - 1, db, table_name,
TRG_EXT, 0);
file.str= file_buff;
return sql_create_definition_file(NULL, &file, &triggers_file_type,
(uchar*)triggers, triggers_file_parameters);
}
2007-10-16 21:37:31 +02:00
/**
Drop trigger for table.
2007-10-16 21:37:31 +02:00
@param thd current thread context
(including trigger definition in LEX)
@param tables table list containing one open table for which trigger
is dropped.
@param[out] stmt_query after successful return, this string contains
well-formed statement for creation this trigger.
@todo
Probably instead of removing .TRG file we should move
to archive directory but this should be done as part of
parse_file.cc functionality (because we will need it
elsewhere).
@retval
False success
@retval
True error
*/
bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables,
String *stmt_query)
{
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
const char *sp_name= thd->lex->spname->m_name.str; // alias
LEX_STRING *name;
char path[FN_REFLEN];
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
List_iterator_fast<LEX_STRING> it_name(names_list);
List_iterator<ulonglong> it_mod(definition_modes_list);
List_iterator<LEX_STRING> it_def(definitions_list);
List_iterator<LEX_STRING> it_definer(definers_list);
List_iterator<LEX_STRING> it_client_cs_name(client_cs_names);
List_iterator<LEX_STRING> it_connection_cl_name(connection_cl_names);
List_iterator<LEX_STRING> it_db_cl_name(db_cl_names);
stmt_query->append(thd->query(), thd->query_length());
while ((name= it_name++))
{
it_def++;
it_mod++;
it_definer++;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
it_client_cs_name++;
it_connection_cl_name++;
it_db_cl_name++;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (my_strcasecmp(table_alias_charset, sp_name, name->str) == 0)
{
/*
Again we don't care much about other things required for
clean trigger removing since table will be reopened anyway.
*/
it_def.remove();
it_mod.remove();
it_definer.remove();
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
it_client_cs_name.remove();
it_connection_cl_name.remove();
it_db_cl_name.remove();
if (definitions_list.is_empty())
{
/*
TODO: Probably instead of removing .TRG file we should move
to archive directory but this should be done as part of
parse_file.cc functionality (because we will need it
elsewhere).
*/
if (rm_trigger_file(path, tables->db, tables->table_name))
return 1;
}
else
{
if (save_trigger_file(this, tables->db, tables->table_name))
return 1;
}
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (rm_trigname_file(path, tables->db, sp_name))
return 1;
return 0;
}
}
2004-11-12 13:34:00 +01:00
my_message(ER_TRG_DOES_NOT_EXIST, ER(ER_TRG_DOES_NOT_EXIST), MYF(0));
return 1;
}
Table_triggers_list::~Table_triggers_list()
{
for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
delete bodies[i][j];
if (record1_field)
for (Field **fld_ptr= record1_field; *fld_ptr; fld_ptr++)
delete *fld_ptr;
}
2007-10-16 21:37:31 +02:00
/**
Prepare array of Field objects referencing to TABLE::record[1] instead
of record[0] (they will represent OLD.* row values in ON UPDATE trigger
and in ON DELETE trigger which will be called during REPLACE execution).
2007-10-16 21:37:31 +02:00
@param table pointer to TABLE object for which we are creating fields.
2007-10-16 21:37:31 +02:00
@retval
False success
@retval
True error
*/
bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
{
Field **fld, **old_fld;
if (!(record1_field= (Field **)alloc_root(&table->mem_root,
(table->s->fields + 1) *
sizeof(Field*))))
return 1;
for (fld= table->field, old_fld= record1_field; *fld; fld++, old_fld++)
{
/*
QQ: it is supposed that it is ok to use this function for field
cloning...
*/
Bug#16218 - Crash on insert delayed Bug#17294 - INSERT DELAYED puting an \n before data Bug#16611 - INSERT DELAYED corrupts data Bug#13707 - Server crash with INSERT DELAYED on MyISAM table Combined as Bug#16218. INSERT DELAYED crashed in 5.0 on a table with a varchar that could be NULL and was created pre-5.0 (Bugs 16218 and 13707). INSERT DELAYED corrupted data in 5.0 on a table with varchar fields that was created pre-5.0 (Bugs 17294 and 16611). In case of INSERT DELAYED the open table is copied from the delayed insert thread to be able to create a record for the queue. When copying the fields, a method was used that did convert old varchar to new varchar fields and did not set up some pointers into the record buffer of the table. The field conversion was guilty for the misinterpretation of the record contents by the delayed insert thread. The wrong pointer setup was guilty for the crashes. For Bug 13707 (Server crash with INSERT DELAYED on MyISAM table) I fixed the above mentioned method to set up one of the pointers. For Bug 16218 I set up the other pointers too. But when looking at the corruptions I got aware that converting the field type was totally wrong for INSERT DELAYED. The copied table is used to create a record that is to be sent to the delayed insert thread. Of course it can interpret the record correctly only if all field types are the same in both table objects. So I revoked the fix for Bug 13707 and changed the new_field() method so that it can suppress conversions. No test case as this is a migration problem. One needs to create a table with 4.x and use it with 5.x. I added two test scripts to the bug report.
2006-06-26 20:57:18 +02:00
if (!(*old_fld= (*fld)->new_field(&table->mem_root, table,
table == (*fld)->table)))
return 1;
(*old_fld)->move_field_offset((my_ptrdiff_t)(table->record[1] -
table->record[0]));
}
*old_fld= 0;
return 0;
}
2007-10-16 21:37:31 +02:00
/**
Adjust Table_triggers_list with new TABLE pointer.
2007-10-16 21:37:31 +02:00
@param new_table new pointer to TABLE instance
*/
void Table_triggers_list::set_table(TABLE *new_table)
{
trigger_table= new_table;
for (Field **field= new_table->triggers->record1_field ; *field ; field++)
{
(*field)->table= (*field)->orig_table= new_table;
(*field)->table_name= &new_table->alias;
}
}
2007-10-16 21:37:31 +02:00
/**
Check whenever .TRG file for table exist and load all triggers it contains.
2007-10-16 21:37:31 +02:00
@param thd current thread context
@param db table's database name
@param table_name table's name
@param table pointer to table object
@param names_only stop after loading trigger names
@todo
A lot of things to do here e.g. how about other funcs and being
more paranoical ?
@todo
This could be avoided if there is no triggers for UPDATE and DELETE.
@retval
False success
@retval
True error
*/
bool Table_triggers_list::check_n_load(THD *thd, const char *db,
const char *table_name, TABLE *table,
bool names_only)
{
char path_buff[FN_REFLEN];
LEX_STRING path;
File_parser *parser;
LEX_STRING save_db;
DBUG_ENTER("Table_triggers_list::check_n_load");
path.length= build_table_filename(path_buff, FN_REFLEN - 1,
db, table_name, TRG_EXT, 0);
path.str= path_buff;
// QQ: should we analyze errno somehow ?
if (access(path_buff, F_OK))
DBUG_RETURN(0);
/*
File exists so we got to load triggers.
FIXME: A lot of things to do here e.g. how about other funcs and being
more paranoical ?
*/
if ((parser= sql_parse_prepare(&path, &table->mem_root, 1)))
{
if (is_equal(&triggers_file_type, parser->type()))
{
Table_triggers_list *triggers=
new (&table->mem_root) Table_triggers_list(table);
Handle_old_incorrect_sql_modes_hook sql_modes_hook(path.str);
if (!triggers)
DBUG_RETURN(1);
/*
We don't have the following attributes in old versions of .TRG file, so
we should initialize the list for safety:
- sql_modes;
- definers;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
- character sets (client, connection, database);
*/
triggers->definition_modes_list.empty();
triggers->definers_list.empty();
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
triggers->client_cs_names.empty();
triggers->connection_cl_names.empty();
triggers->db_cl_names.empty();
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
if (parser->parse((uchar*)triggers, &table->mem_root,
triggers_file_parameters,
TRG_NUM_REQUIRED_PARAMETERS,
&sql_modes_hook))
DBUG_RETURN(1);
List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
LEX_STRING *trg_create_str;
ulonglong *trg_sql_mode;
if (triggers->definition_modes_list.is_empty() &&
!triggers->definitions_list.is_empty())
{
/*
It is old file format => we should fill list of sql_modes.
We use one mode (current) for all triggers, because we have not
information about mode in old format.
*/
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (!(trg_sql_mode= alloc_type<ulonglong>(&table->mem_root)))
{
DBUG_RETURN(1); // EOM
}
*trg_sql_mode= global_system_variables.sql_mode;
while (it++)
{
if (triggers->definition_modes_list.push_back(trg_sql_mode,
&table->mem_root))
{
DBUG_RETURN(1); // EOM
}
}
it.rewind();
}
if (triggers->definers_list.is_empty() &&
!triggers->definitions_list.is_empty())
{
/*
It is old file format => we should fill list of definers.
If there is no definer information, we should not switch context to
definer when checking privileges. I.e. privileges for such triggers
are checked for "invoker" rather than for "definer".
*/
LEX_STRING *trg_definer;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (!(trg_definer= alloc_lex_string(&table->mem_root)))
DBUG_RETURN(1); // EOM
trg_definer->str= (char*) "";
trg_definer->length= 0;
while (it++)
{
if (triggers->definers_list.push_back(trg_definer,
&table->mem_root))
{
DBUG_RETURN(1); // EOM
}
}
it.rewind();
}
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (!triggers->definitions_list.is_empty() &&
(triggers->client_cs_names.is_empty() ||
triggers->connection_cl_names.is_empty() ||
triggers->db_cl_names.is_empty()))
{
/*
It is old file format => we should fill lists of character sets.
*/
LEX_STRING *trg_client_cs_name;
LEX_STRING *trg_connection_cl_name;
LEX_STRING *trg_db_cl_name;
if (!triggers->client_cs_names.is_empty() ||
!triggers->connection_cl_names.is_empty() ||
!triggers->db_cl_names.is_empty())
{
my_error(ER_TRG_CORRUPTED_FILE, MYF(0),
(const char *) db,
(const char *) table_name);
DBUG_RETURN(1); // EOM
}
if (!thd->no_warnings_for_error)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRG_NO_CREATION_CTX,
ER(ER_TRG_NO_CREATION_CTX),
(const char*) db,
(const char*) table_name);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (!(trg_client_cs_name= alloc_lex_string(&table->mem_root)) ||
!(trg_connection_cl_name= alloc_lex_string(&table->mem_root)) ||
!(trg_db_cl_name= alloc_lex_string(&table->mem_root)))
{
DBUG_RETURN(1); // EOM
}
/*
Backward compatibility: assume that the query is in the current
character set.
*/
lex_string_set(trg_client_cs_name,
thd->variables.character_set_client->csname);
lex_string_set(trg_connection_cl_name,
thd->variables.collation_connection->name);
lex_string_set(trg_db_cl_name,
thd->variables.collation_database->name);
while (it++)
{
if (triggers->client_cs_names.push_back(trg_client_cs_name,
&table->mem_root) ||
triggers->connection_cl_names.push_back(trg_connection_cl_name,
&table->mem_root) ||
triggers->db_cl_names.push_back(trg_db_cl_name,
&table->mem_root))
{
DBUG_RETURN(1); // EOM
}
}
it.rewind();
}
DBUG_ASSERT(triggers->definition_modes_list.elements ==
triggers->definitions_list.elements);
DBUG_ASSERT(triggers->definers_list.elements ==
triggers->definitions_list.elements);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
DBUG_ASSERT(triggers->client_cs_names.elements ==
triggers->definitions_list.elements);
DBUG_ASSERT(triggers->connection_cl_names.elements ==
triggers->definitions_list.elements);
DBUG_ASSERT(triggers->db_cl_names.elements ==
triggers->definitions_list.elements);
table->triggers= triggers;
/*
TODO: This could be avoided if there is no triggers
for UPDATE and DELETE.
*/
if (!names_only && triggers->prepare_record1_accessors(table))
DBUG_RETURN(1);
List_iterator_fast<ulonglong> itm(triggers->definition_modes_list);
List_iterator_fast<LEX_STRING> it_definer(triggers->definers_list);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
List_iterator_fast<LEX_STRING> it_client_cs_name(triggers->client_cs_names);
List_iterator_fast<LEX_STRING> it_connection_cl_name(triggers->connection_cl_names);
List_iterator_fast<LEX_STRING> it_db_cl_name(triggers->db_cl_names);
LEX *old_lex= thd->lex, lex;
sp_rcontext *save_spcont= thd->spcont;
ulong save_sql_mode= thd->variables.sql_mode;
LEX_STRING *on_table_name;
thd->lex= &lex;
save_db.str= thd->db;
save_db.length= thd->db_length;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
thd->reset_db((char*) db, strlen(db));
while ((trg_create_str= it++))
{
sp_head *sp;
trg_sql_mode= itm++;
LEX_STRING *trg_definer= it_definer++;
thd->variables.sql_mode= (ulong)*trg_sql_mode;
Parser_state parser_state;
if (parser_state.init(thd, trg_create_str->str, trg_create_str->length))
goto err_with_lex_cleanup;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
Trigger_creation_ctx *creation_ctx=
Trigger_creation_ctx::create(thd,
db,
table_name,
it_client_cs_name++,
it_connection_cl_name++,
it_db_cl_name++);
lex_start(thd);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
thd->spcont= NULL;
if (parse_sql(thd, & parser_state, creation_ctx))
{
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/* Currently sphead is always deleted in case of a parse error */
DBUG_ASSERT(lex.sphead == 0);
goto err_with_lex_cleanup;
}
/*
Not strictly necessary to invoke this method here, since we know
that we've parsed CREATE TRIGGER and not an
UPDATE/DELETE/INSERT/REPLACE/LOAD/CREATE TABLE, but we try to
maintain the invariant that this method is called for each
distinct statement, in case its logic is extended with other
types of analyses in future.
*/
lex.set_trg_event_type_for_tables();
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
int event= lex.trg_chistics.event;
int action_time= lex.trg_chistics.action_time;
sp= triggers->bodies[event][action_time]= lex.sphead;
lex.sphead= NULL; /* Prevent double cleanup. */
sp->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode);
sp->set_creation_ctx(creation_ctx);
if (!trg_definer->length)
{
/*
This trigger was created/imported from the previous version of
MySQL, which does not support triggers definers. We should emit
warning here.
*/
if (!thd->no_warnings_for_error)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRG_NO_DEFINER, ER(ER_TRG_NO_DEFINER),
(const char*) db,
(const char*) sp->m_name.str);
/*
Set definer to the '' to correct displaying in the information
schema.
*/
sp->set_definer((char*) "", 0);
/*
Triggers without definer information are executed under the
authorization of the invoker.
*/
sp->m_chistics->suid= SP_IS_NOT_SUID;
}
else
sp->set_definer(trg_definer->str, trg_definer->length);
if (triggers->names_list.push_back(&sp->m_name, &table->mem_root))
goto err_with_lex_cleanup;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (!(on_table_name= alloc_lex_string(&table->mem_root)))
goto err_with_lex_cleanup;
Bug#25411 (trigger code truncated), PART II Bug 28127 (Some valid identifiers names are not parsed correctly) Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) This patch is the second part of a major cleanup, required to fix Bug 25411 (trigger code truncated). The root cause of the issue stems from the function skip_rear_comments, which was a work around to remove "extra" "*/" characters from the query text, when parsing a query and reusing the text fragments to represent a view, trigger, function or stored procedure. The reason for this work around is that "special comments", like /*!50002 XXX */, were not parsed properly, so that a query like: AAA /*!50002 BBB */ CCC would be seen by the parser as "AAA BBB */ CCC" when the current version is greater or equal to 5.0.2 The root cause of this stems from how special comments are parsed. Special comments are really out-of-bound text that appear inside a query, that affects how the parser behave. In nature, /*!50002 XXX */ in MySQL is similar to the C concept of preprocessing : #if VERSION >= 50002 XXX #endif Depending on the current VERSION of the server, either the special comment should be expanded or it should be ignored, but in all cases the "text" of the query should be re-written to strip the "/*!50002" and "*/" markers, which does not belong to the SQL language itself. Prior to this fix, these markers would leak into : - the storage format for VIEW, - the storage format for FUNCTION, - the storage format for FUNCTION parameters, in mysql.proc (param_list), - the storage format for PROCEDURE, - the storage format for PROCEDURE parameters, in mysql.proc (param_list), - the storage format for TRIGGER, - the binary log used for replication. In all cases, not only this cause format corruption, but also provide a vector for dormant security issues, by allowing to tunnel code that will be activated after an upgrade. The proper solution is to deal with special comments strictly during parsing, when accepting a query from the outside world. Once a query is parsed and an object is created with a persistant representation, this object should not arbitrarily mutate after an upgrade. In short, special comments are a useful but limited feature for MYSQLdump, when used at an *interface* level to facilitate import/export, but bloating the server *internal* storage format is *not* the proper way to deal with configuration management of the user logic. With this fix: - the Lex_input_stream class now acts as a comment pre-processor, and either expands or ignore special comments on the fly. - MYSQLlex and sql_yacc.yy have been cleaned up to strictly use the public interface of Lex_input_stream. In particular, how the input stream accepts or rejects a character is private to Lex_input_stream, and the internal buffer pointers of that class are strictly private, and should not be tempered with during parsing. This caused many changes mostly in sql_lex.cc. During the code cleanup in case MY_LEX_NUMBER_IDENT, Bug 28127 (Some valid identifiers names are not parsed correctly) was found and fixed. By parsing special comments properly, and removing the function 'skip_rear_comments' [sic], Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) has been fixed as well.
2007-06-12 23:23:58 +02:00
on_table_name->str= (char*) lex.raw_trg_on_table_name_begin;
on_table_name->length= lex.raw_trg_on_table_name_end
- lex.raw_trg_on_table_name_begin;
if (triggers->on_table_names_list.push_back(on_table_name, &table->mem_root))
goto err_with_lex_cleanup;
#ifndef DBUG_OFF
/*
Let us check that we correctly update trigger definitions when we
rename tables with triggers.
In special cases like "RENAME TABLE `#mysql50#somename` TO `somename`"
or "ALTER DATABASE `#mysql50#somename` UPGRADE DATA DIRECTORY NAME"
we might be given table or database name with "#mysql50#" prefix (and
trigger's definiton contains un-prefixed version of the same name).
To remove this prefix we use check_n_cut_mysql50_prefix().
*/
char fname[NAME_LEN + 1];
DBUG_ASSERT((!my_strcasecmp(table_alias_charset, lex.query_tables->db, db) ||
(check_n_cut_mysql50_prefix(db, fname, sizeof(fname)) &&
!my_strcasecmp(table_alias_charset, lex.query_tables->db, fname))) &&
(!my_strcasecmp(table_alias_charset, lex.query_tables->table_name,
table_name) ||
(check_n_cut_mysql50_prefix(table_name, fname, sizeof(fname)) &&
!my_strcasecmp(table_alias_charset, lex.query_tables->table_name, fname))));
#endif
if (names_only)
{
lex_end(&lex);
continue;
}
/*
Fix for bug#18437 "Wrong values inserted with a before update trigger on NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens.
2006-07-01 23:51:10 +02:00
Gather all Item_trigger_field objects representing access to fields
in old/new versions of row in trigger into lists containing all such
objects for the triggers with same action and timing.
*/
triggers->trigger_fields[lex.trg_chistics.event]
[lex.trg_chistics.action_time]=
lex.trg_table_fields.first;
Fix for bug#18437 "Wrong values inserted with a before update trigger on NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens.
2006-07-01 23:51:10 +02:00
/*
Also let us bind these objects to Field objects in table being
opened.
We ignore errors here, because if even something is wrong we still
will be willing to open table to perform some operations (e.g.
SELECT)...
Anyway some things can be checked only during trigger execution.
*/
for (Item_trigger_field *trg_field= lex.trg_table_fields.first;
trg_field;
trg_field= trg_field->next_trg_field)
{
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
trg_field->setup_field(thd, table,
&triggers->subject_table_grants[lex.trg_chistics.event]
[lex.trg_chistics.action_time]);
}
lex_end(&lex);
}
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
thd->reset_db(save_db.str, save_db.length);
thd->lex= old_lex;
2005-11-23 01:49:44 +01:00
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
DBUG_RETURN(0);
err_with_lex_cleanup:
// QQ: anything else ?
lex_end(&lex);
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
thd->reset_db(save_db.str, save_db.length);
DBUG_RETURN(1);
}
/*
We don't care about this error message much because .TRG files will
be merged into .FRM anyway.
*/
my_error(ER_WRONG_OBJECT, MYF(0),
table_name, TRG_EXT + 1, "TRIGGER");
DBUG_RETURN(1);
}
DBUG_RETURN(1);
}
2007-10-16 21:37:31 +02:00
/**
Obtains and returns trigger metadata.
@param thd current thread context
@param event trigger event type
@param time_type trigger action time
@param trigger_name returns name of trigger
@param trigger_stmt returns statement of trigger
@param sql_mode returns sql_mode of trigger
@param definer returns definer/creator of trigger. The caller is
responsible to allocate enough space for storing
definer information.
@retval
False success
@retval
True error
*/
bool Table_triggers_list::get_trigger_info(THD *thd, trg_event_type event,
trg_action_time_type time_type,
LEX_STRING *trigger_name,
LEX_STRING *trigger_stmt,
ulong *sql_mode,
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
LEX_STRING *definer,
LEX_STRING *client_cs_name,
LEX_STRING *connection_cl_name,
LEX_STRING *db_cl_name)
{
sp_head *body;
DBUG_ENTER("get_trigger_info");
if ((body= bodies[event][time_type]))
{
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
Stored_program_creation_ctx *creation_ctx=
bodies[event][time_type]->get_creation_ctx();
*trigger_name= body->m_name;
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
*trigger_stmt= body->m_body_utf8;
*sql_mode= body->m_sql_mode;
if (body->m_chistics->suid == SP_IS_NOT_SUID)
{
definer->str[0]= 0;
definer->length= 0;
}
else
{
definer->length= strxmov(definer->str, body->m_definer_user.str, "@",
body->m_definer_host.str, NullS) - definer->str;
}
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
lex_string_set(client_cs_name,
creation_ctx->get_client_cs()->csname);
lex_string_set(connection_cl_name,
creation_ctx->get_connection_cl()->name);
lex_string_set(db_cl_name,
creation_ctx->get_db_cl()->name);
DBUG_RETURN(0);
}
DBUG_RETURN(1);
}
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
void Table_triggers_list::get_trigger_info(THD *thd,
int trigger_idx,
LEX_STRING *trigger_name,
ulonglong *sql_mode,
LEX_STRING *sql_original_stmt,
LEX_STRING *client_cs_name,
LEX_STRING *connection_cl_name,
LEX_STRING *db_cl_name)
{
List_iterator_fast<LEX_STRING> it_trigger_name(names_list);
List_iterator_fast<ulonglong> it_sql_mode(definition_modes_list);
List_iterator_fast<LEX_STRING> it_sql_orig_stmt(definitions_list);
List_iterator_fast<LEX_STRING> it_client_cs_name(client_cs_names);
List_iterator_fast<LEX_STRING> it_connection_cl_name(connection_cl_names);
List_iterator_fast<LEX_STRING> it_db_cl_name(db_cl_names);
for (int i = 0; i < trigger_idx; ++i)
{
it_trigger_name.next_fast();
it_sql_mode.next_fast();
it_sql_orig_stmt.next_fast();
it_client_cs_name.next_fast();
it_connection_cl_name.next_fast();
it_db_cl_name.next_fast();
}
*trigger_name= *(it_trigger_name++);
*sql_mode= *(it_sql_mode++);
*sql_original_stmt= *(it_sql_orig_stmt++);
*client_cs_name= *(it_client_cs_name++);
*connection_cl_name= *(it_connection_cl_name++);
*db_cl_name= *(it_db_cl_name++);
}
int Table_triggers_list::find_trigger_by_name(const LEX_STRING *trg_name)
{
List_iterator_fast<LEX_STRING> it(names_list);
for (int i = 0; ; ++i)
{
LEX_STRING *cur_name= it++;
if (!cur_name)
return -1;
if (strcmp(cur_name->str, trg_name->str) == 0)
return i;
}
}
/**
Find trigger's table from trigger identifier and add it to
the statement table list.
@param[in] thd Thread context.
@param[in] trg_name Trigger name.
@param[in] if_exists TRUE if SQL statement contains "IF EXISTS" clause.
That means a warning instead of error should be
thrown if trigger with given name does not exist.
@param[out] table Pointer to TABLE_LIST object for the
table trigger.
@return Operation status
@retval FALSE On success.
@retval TRUE Otherwise.
*/
bool add_table_for_trigger(THD *thd,
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
const sp_name *trg_name,
bool if_exists,
TABLE_LIST **table)
{
LEX *lex= thd->lex;
char trn_path_buff[FN_REFLEN];
LEX_STRING trn_path= { trn_path_buff, 0 };
LEX_STRING tbl_name;
DBUG_ENTER("add_table_for_trigger");
build_trn_path(thd, trg_name, &trn_path);
if (check_trn_exists(&trn_path))
{
if (if_exists)
{
push_warning_printf(thd,
MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_TRG_DOES_NOT_EXIST,
ER(ER_TRG_DOES_NOT_EXIST));
*table= NULL;
DBUG_RETURN(FALSE);
}
my_error(ER_TRG_DOES_NOT_EXIST, MYF(0));
DBUG_RETURN(TRUE);
}
if (load_table_name_for_trigger(thd, trg_name, &trn_path, &tbl_name))
DBUG_RETURN(TRUE);
*table= sp_add_to_query_tables(thd, lex, trg_name->m_db.str,
tbl_name.str, TL_IGNORE,
MDL_SHARED_NO_WRITE);
DBUG_RETURN(*table ? FALSE : TRUE);
}
2007-10-16 21:37:31 +02:00
/**
Drop all triggers for table.
2007-10-16 21:37:31 +02:00
@param thd current thread context
@param db schema for table
@param name name for table
2007-10-16 21:37:31 +02:00
@note
The calling thread should hold the LOCK_open mutex;
2007-10-16 21:37:31 +02:00
@retval
False success
@retval
True error
*/
bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name)
{
TABLE table;
char path[FN_REFLEN];
bool result= 0;
DBUG_ENTER("drop_all_triggers");
bzero(&table, sizeof(table));
init_sql_alloc(&table.mem_root, 8192, 0);
if (Table_triggers_list::check_n_load(thd, db, name, &table, 1))
{
result= 1;
goto end;
}
if (table.triggers)
{
LEX_STRING *trigger;
List_iterator_fast<LEX_STRING> it_name(table.triggers->names_list);
while ((trigger= it_name++))
{
if (rm_trigname_file(path, db, trigger->str))
{
/*
Instead of immediately bailing out with error if we were unable
to remove .TRN file we will try to drop other files.
*/
result= 1;
continue;
}
}
if (rm_trigger_file(path, db, name))
{
result= 1;
goto end;
}
}
end:
if (table.triggers)
delete table.triggers;
free_root(&table.mem_root, MYF(0));
DBUG_RETURN(result);
}
2007-10-16 21:37:31 +02:00
/**
Update .TRG file after renaming triggers' subject table
(change name of table in triggers' definitions).
2007-10-16 21:37:31 +02:00
@param thd Thread context
@param old_db_name Old database of subject table
@param new_db_name New database of subject table
2007-10-16 21:37:31 +02:00
@param old_table_name Old subject table's name
@param new_table_name New subject table's name
2007-10-16 21:37:31 +02:00
@retval
FALSE Success
2007-10-16 21:37:31 +02:00
@retval
TRUE Failure
*/
bool
Table_triggers_list::change_table_name_in_triggers(THD *thd,
const char *old_db_name,
const char *new_db_name,
LEX_STRING *old_table_name,
LEX_STRING *new_table_name)
{
char path_buff[FN_REFLEN];
LEX_STRING *def, *on_table_name, new_def;
ulong save_sql_mode= thd->variables.sql_mode;
List_iterator_fast<LEX_STRING> it_def(definitions_list);
List_iterator_fast<LEX_STRING> it_on_table_name(on_table_names_list);
List_iterator_fast<ulonglong> it_mode(definition_modes_list);
size_t on_q_table_name_len, before_on_len;
String buff;
DBUG_ASSERT(definitions_list.elements == on_table_names_list.elements &&
definitions_list.elements == definition_modes_list.elements);
while ((def= it_def++))
{
on_table_name= it_on_table_name++;
thd->variables.sql_mode= (ulong) *(it_mode++);
/* Construct CREATE TRIGGER statement with new table name. */
buff.length(0);
Bug#25411 (trigger code truncated), PART II Bug 28127 (Some valid identifiers names are not parsed correctly) Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) This patch is the second part of a major cleanup, required to fix Bug 25411 (trigger code truncated). The root cause of the issue stems from the function skip_rear_comments, which was a work around to remove "extra" "*/" characters from the query text, when parsing a query and reusing the text fragments to represent a view, trigger, function or stored procedure. The reason for this work around is that "special comments", like /*!50002 XXX */, were not parsed properly, so that a query like: AAA /*!50002 BBB */ CCC would be seen by the parser as "AAA BBB */ CCC" when the current version is greater or equal to 5.0.2 The root cause of this stems from how special comments are parsed. Special comments are really out-of-bound text that appear inside a query, that affects how the parser behave. In nature, /*!50002 XXX */ in MySQL is similar to the C concept of preprocessing : #if VERSION >= 50002 XXX #endif Depending on the current VERSION of the server, either the special comment should be expanded or it should be ignored, but in all cases the "text" of the query should be re-written to strip the "/*!50002" and "*/" markers, which does not belong to the SQL language itself. Prior to this fix, these markers would leak into : - the storage format for VIEW, - the storage format for FUNCTION, - the storage format for FUNCTION parameters, in mysql.proc (param_list), - the storage format for PROCEDURE, - the storage format for PROCEDURE parameters, in mysql.proc (param_list), - the storage format for TRIGGER, - the binary log used for replication. In all cases, not only this cause format corruption, but also provide a vector for dormant security issues, by allowing to tunnel code that will be activated after an upgrade. The proper solution is to deal with special comments strictly during parsing, when accepting a query from the outside world. Once a query is parsed and an object is created with a persistant representation, this object should not arbitrarily mutate after an upgrade. In short, special comments are a useful but limited feature for MYSQLdump, when used at an *interface* level to facilitate import/export, but bloating the server *internal* storage format is *not* the proper way to deal with configuration management of the user logic. With this fix: - the Lex_input_stream class now acts as a comment pre-processor, and either expands or ignore special comments on the fly. - MYSQLlex and sql_yacc.yy have been cleaned up to strictly use the public interface of Lex_input_stream. In particular, how the input stream accepts or rejects a character is private to Lex_input_stream, and the internal buffer pointers of that class are strictly private, and should not be tempered with during parsing. This caused many changes mostly in sql_lex.cc. During the code cleanup in case MY_LEX_NUMBER_IDENT, Bug 28127 (Some valid identifiers names are not parsed correctly) was found and fixed. By parsing special comments properly, and removing the function 'skip_rear_comments' [sic], Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) has been fixed as well.
2007-06-12 23:23:58 +02:00
/* WARNING: 'on_table_name' is supposed to point inside 'def' */
DBUG_ASSERT(on_table_name->str > def->str);
DBUG_ASSERT(on_table_name->str < (def->str + def->length));
before_on_len= on_table_name->str - def->str;
Bug#25411 (trigger code truncated), PART II Bug 28127 (Some valid identifiers names are not parsed correctly) Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) This patch is the second part of a major cleanup, required to fix Bug 25411 (trigger code truncated). The root cause of the issue stems from the function skip_rear_comments, which was a work around to remove "extra" "*/" characters from the query text, when parsing a query and reusing the text fragments to represent a view, trigger, function or stored procedure. The reason for this work around is that "special comments", like /*!50002 XXX */, were not parsed properly, so that a query like: AAA /*!50002 BBB */ CCC would be seen by the parser as "AAA BBB */ CCC" when the current version is greater or equal to 5.0.2 The root cause of this stems from how special comments are parsed. Special comments are really out-of-bound text that appear inside a query, that affects how the parser behave. In nature, /*!50002 XXX */ in MySQL is similar to the C concept of preprocessing : #if VERSION >= 50002 XXX #endif Depending on the current VERSION of the server, either the special comment should be expanded or it should be ignored, but in all cases the "text" of the query should be re-written to strip the "/*!50002" and "*/" markers, which does not belong to the SQL language itself. Prior to this fix, these markers would leak into : - the storage format for VIEW, - the storage format for FUNCTION, - the storage format for FUNCTION parameters, in mysql.proc (param_list), - the storage format for PROCEDURE, - the storage format for PROCEDURE parameters, in mysql.proc (param_list), - the storage format for TRIGGER, - the binary log used for replication. In all cases, not only this cause format corruption, but also provide a vector for dormant security issues, by allowing to tunnel code that will be activated after an upgrade. The proper solution is to deal with special comments strictly during parsing, when accepting a query from the outside world. Once a query is parsed and an object is created with a persistant representation, this object should not arbitrarily mutate after an upgrade. In short, special comments are a useful but limited feature for MYSQLdump, when used at an *interface* level to facilitate import/export, but bloating the server *internal* storage format is *not* the proper way to deal with configuration management of the user logic. With this fix: - the Lex_input_stream class now acts as a comment pre-processor, and either expands or ignore special comments on the fly. - MYSQLlex and sql_yacc.yy have been cleaned up to strictly use the public interface of Lex_input_stream. In particular, how the input stream accepts or rejects a character is private to Lex_input_stream, and the internal buffer pointers of that class are strictly private, and should not be tempered with during parsing. This caused many changes mostly in sql_lex.cc. During the code cleanup in case MY_LEX_NUMBER_IDENT, Bug 28127 (Some valid identifiers names are not parsed correctly) was found and fixed. By parsing special comments properly, and removing the function 'skip_rear_comments' [sic], Bug 26302 (MySQL server cuts off trailing "*/" from comments in SP/func) has been fixed as well.
2007-06-12 23:23:58 +02:00
buff.append(def->str, before_on_len);
buff.append(STRING_WITH_LEN("ON "));
append_identifier(thd, &buff, new_table_name->str, new_table_name->length);
buff.append(STRING_WITH_LEN(" "));
on_q_table_name_len= buff.length() - before_on_len;
buff.append(on_table_name->str + on_table_name->length,
def->length - (before_on_len + on_table_name->length));
/*
It is OK to allocate some memory on table's MEM_ROOT since this
table instance will be thrown out at the end of rename anyway.
*/
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
new_def.str= (char*) memdup_root(&trigger_table->mem_root, buff.ptr(),
buff.length());
new_def.length= buff.length();
on_table_name->str= new_def.str + before_on_len;
on_table_name->length= on_q_table_name_len;
*def= new_def;
}
thd->variables.sql_mode= save_sql_mode;
if (thd->is_fatal_error)
return TRUE; /* OOM */
if (save_trigger_file(this, new_db_name, new_table_name->str))
return TRUE;
if (rm_trigger_file(path_buff, old_db_name, old_table_name->str))
{
(void) rm_trigger_file(path_buff, new_db_name, new_table_name->str);
return TRUE;
}
return FALSE;
}
2007-10-16 21:37:31 +02:00
/**
Iterate though Table_triggers_list::names_list list and update
.TRN files after renaming triggers' subject table.
@param old_db_name Old database of subject table
@param new_db_name New database of subject table
2007-10-16 21:37:31 +02:00
@param new_table_name New subject table's name
@param stopper Pointer to Table_triggers_list::names_list at
which we should stop updating.
2007-10-16 21:37:31 +02:00
@retval
0 Success
2007-10-16 21:37:31 +02:00
@retval
non-0 Failure, pointer to Table_triggers_list::names_list element
2007-10-16 21:37:31 +02:00
for which update failed.
*/
LEX_STRING*
Table_triggers_list::change_table_name_in_trignames(const char *old_db_name,
const char *new_db_name,
LEX_STRING *new_table_name,
LEX_STRING *stopper)
{
char trigname_buff[FN_REFLEN];
struct st_trigname trigname;
LEX_STRING trigname_file;
LEX_STRING *trigger;
List_iterator_fast<LEX_STRING> it_name(names_list);
while ((trigger= it_name++) != stopper)
{
trigname_file.length= build_table_filename(trigname_buff, FN_REFLEN-1,
new_db_name, trigger->str,
TRN_EXT, 0);
trigname_file.str= trigname_buff;
trigname.trigger_table= *new_table_name;
if (sql_create_definition_file(NULL, &trigname_file, &trigname_file_type,
(uchar*)&trigname, trigname_file_parameters))
return trigger;
/* Remove stale .TRN file in case of database upgrade */
if (old_db_name)
{
if (rm_trigname_file(trigname_buff, old_db_name, trigger->str))
{
(void) rm_trigname_file(trigname_buff, new_db_name, trigger->str);
return trigger;
}
}
}
return 0;
}
/**
2007-10-16 21:37:31 +02:00
Update .TRG and .TRN files after renaming triggers' subject table.
@param[in,out] thd Thread context
@param[in] db Old database of subject table
@param[in] old_table Old name of subject table
@param[in] new_db New database for subject table
@param[in] new_table New name of subject table
@note
This method tries to leave trigger related files in consistent state,
i.e. it either will complete successfully, or will fail leaving files
in their initial state.
Also this method assumes that subject table is not renamed to itself.
This method needs to be called under an exclusive table metadata lock.
@retval FALSE Success
@retval TRUE Error
*/
bool Table_triggers_list::change_table_name(THD *thd, const char *db,
const char *old_table,
const char *new_db,
const char *new_table)
{
TABLE table;
bool result= 0;
bool upgrading50to51= FALSE;
LEX_STRING *err_trigname;
DBUG_ENTER("change_table_name");
bzero(&table, sizeof(table));
init_sql_alloc(&table.mem_root, 8192, 0);
/*
This method interfaces the mysql server code protected by
either LOCK_open mutex or with an exclusive metadata lock.
In the future, only an exclusive metadata lock will be enough.
*/
#ifndef DBUG_OFF
Implement new type-of-operation-aware metadata locks. Add a wait-for graph based deadlock detector to the MDL subsystem. Fixes bug #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and bug #37346 "innodb does not detect deadlock between update and alter table". The first bug manifested itself as an unwarranted abort of a transaction with ER_LOCK_DEADLOCK error by a concurrent ALTER statement, when this transaction tried to repeat use of a table, which it has already used in a similar fashion before ALTER started. The second bug showed up as a deadlock between table-level locks and InnoDB row locks, which was "detected" only after innodb_lock_wait_timeout timeout. A transaction would start using the table and modify a few rows. Then ALTER TABLE would come in, and start copying rows into a temporary table. Eventually it would stumble on the modified records and get blocked on a row lock. The first transaction would try to do more updates, and get blocked on thr_lock.c lock. This situation of circular wait would only get resolved by a timeout. Both these bugs stemmed from inadequate solutions to the problem of deadlocks occurring between different locking subsystems. In the first case we tried to avoid deadlocks between metadata locking and table-level locking subsystems, when upgrading shared metadata lock to exclusive one. Transactions holding the shared lock on the table and waiting for some table-level lock used to be aborted too aggressively. We also allowed ALTER TABLE to start in presence of transactions that modify the subject table. ALTER TABLE acquires TL_WRITE_ALLOW_READ lock at start, and that block all writes against the table (naturally, we don't want any writes to be lost when switching the old and the new table). TL_WRITE_ALLOW_READ lock, in turn, would block the started transaction on thr_lock.c lock, should they do more updates. This, again, lead to the need to abort such transactions. The second bug occurred simply because we didn't have any mechanism to detect deadlocks between the table-level locks in thr_lock.c and row-level locks in InnoDB, other than innodb_lock_wait_timeout. This patch solves both these problems by moving lock conflicts which are causing these deadlocks into the metadata locking subsystem, thus making it possible to avoid or detect such deadlocks inside MDL. To do this we introduce new type-of-operation-aware metadata locks, which allow MDL subsystem to know not only the fact that transaction has used or is going to use some object but also what kind of operation it has carried out or going to carry out on the object. This, along with the addition of a special kind of upgradable metadata lock, allows ALTER TABLE to wait until all transactions which has updated the table to go away. This solves the second issue. Another special type of upgradable metadata lock is acquired by LOCK TABLE WRITE. This second lock type allows to solve the first issue, since abortion of table-level locks in event of DDL under LOCK TABLES becomes also unnecessary. Below follows the list of incompatible changes introduced by this patch: - From now on, ALTER TABLE and CREATE/DROP TRIGGER SQL (i.e. those statements that acquire TL_WRITE_ALLOW_READ lock) wait for all transactions which has *updated* the table to complete. - From now on, LOCK TABLES ... WRITE, REPAIR/OPTIMIZE TABLE (i.e. all statements which acquire TL_WRITE table-level lock) wait for all transaction which *updated or read* from the table to complete. As a consequence, innodb_table_locks=0 option no longer applies to LOCK TABLES ... WRITE. - DROP DATABASE, DROP TABLE, RENAME TABLE no longer abort statements or transactions which use tables being dropped or renamed, and instead wait for these transactions to complete. - Since LOCK TABLES WRITE now takes a special metadata lock, not compatible with with reads or writes against the subject table and transaction-wide, thr_lock.c deadlock avoidance algorithm that used to ensure absence of deadlocks between LOCK TABLES WRITE and other statements is no longer sufficient, even for MyISAM. The wait-for graph based deadlock detector of MDL subsystem may sometimes be necessary and is involved. This may lead to ER_LOCK_DEADLOCK error produced for multi-statement transactions even if these only use MyISAM: session 1: session 2: begin; update t1 ... lock table t2 write, t1 write; -- gets a lock on t2, blocks on t1 update t2 ... (ER_LOCK_DEADLOCK) - Finally, support of LOW_PRIORITY option for LOCK TABLES ... WRITE was abandoned. LOCK TABLE ... LOW_PRIORITY WRITE from now on has the same priority as the usual LOCK TABLE ... WRITE. SELECT HIGH PRIORITY no longer trumps LOCK TABLE ... WRITE in the wait queue. - We do not take upgradable metadata locks on implicitly locked tables. So if one has, say, a view v1 that uses table t1, and issues: LOCK TABLE v1 WRITE; FLUSH TABLE t1; -- (or just 'FLUSH TABLES'), an error is produced. In order to be able to perform DDL on a table under LOCK TABLES, the table must be locked explicitly in the LOCK TABLES list.
2010-02-01 12:43:06 +01:00
if (thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, old_table,
MDL_EXCLUSIVE))
mysql_mutex_assert_owner(&LOCK_open);
#endif
DBUG_ASSERT(my_strcasecmp(table_alias_charset, db, new_db) ||
my_strcasecmp(table_alias_charset, old_table, new_table));
if (Table_triggers_list::check_n_load(thd, db, old_table, &table, TRUE))
{
result= 1;
goto end;
}
if (table.triggers)
{
LEX_STRING old_table_name= { (char *) old_table, strlen(old_table) };
LEX_STRING new_table_name= { (char *) new_table, strlen(new_table) };
/*
Since triggers should be in the same schema as their subject tables
moving table with them between two schemas raises too many questions.
(E.g. what should happen if in new schema we already have trigger
with same name ?).
In case of "ALTER DATABASE `#mysql50#db1` UPGRADE DATA DIRECTORY NAME"
we will be given table name with "#mysql50#" prefix
To remove this prefix we use check_n_cut_mysql50_prefix().
*/
if (my_strcasecmp(table_alias_charset, db, new_db))
{
char dbname[NAME_LEN + 1];
if (check_n_cut_mysql50_prefix(db, dbname, sizeof(dbname)) &&
!my_strcasecmp(table_alias_charset, dbname, new_db))
{
upgrading50to51= TRUE;
}
else
{
my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0));
result= 1;
goto end;
}
}
if (table.triggers->change_table_name_in_triggers(thd, db, new_db,
&old_table_name,
&new_table_name))
{
result= 1;
goto end;
}
if ((err_trigname= table.triggers->change_table_name_in_trignames(
upgrading50to51 ? db : NULL,
new_db, &new_table_name, 0)))
{
/*
If we were unable to update one of .TRN files properly we will
revert all changes that we have done and report about error.
We assume that we will be able to undo our changes without errors
(we can't do much if there will be an error anyway).
*/
(void) table.triggers->change_table_name_in_trignames(
upgrading50to51 ? new_db : NULL, db,
&old_table_name, err_trigname);
(void) table.triggers->change_table_name_in_triggers(
thd, db, new_db,
&new_table_name, &old_table_name);
result= 1;
goto end;
}
}
end:
delete table.triggers;
free_root(&table.mem_root, MYF(0));
DBUG_RETURN(result);
}
/**
Execute trigger for given (event, time) pair.
The operation executes trigger for the specified event (insert, update,
delete) and time (after, before) if it is set.
@param thd
@param event
2007-08-15 15:43:08 +02:00
@param time_type
@param old_row_is_record1
@return Error status.
@retval FALSE on success.
@retval TRUE on error.
*/
bool Table_triggers_list::process_triggers(THD *thd,
trg_event_type event,
trg_action_time_type time_type,
bool old_row_is_record1)
{
bool err_status;
Sub_statement_state statement_state;
sp_head *sp_trigger= bodies[event][time_type];
if (sp_trigger == NULL)
return FALSE;
if (old_row_is_record1)
{
old_field= record1_field;
new_field= trigger_table->field;
}
else
{
new_field= record1_field;
old_field= trigger_table->field;
}
/*
This trigger must have been processed by the pre-locking
algorithm.
*/
DBUG_ASSERT(trigger_table->pos_in_table_list->trg_event_map &
static_cast<uint>(1 << static_cast<int>(event)));
thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER);
err_status=
sp_trigger->execute_trigger(thd,
&trigger_table->s->db,
&trigger_table->s->table_name,
&subject_table_grants[event][time_type]);
thd->restore_sub_statement_state(&statement_state);
return err_status;
}
/**
Add triggers for table to the set of routines used by statement.
Add tables used by them to statement table list. Do the same for
routines used by triggers.
@param thd Thread context.
@param prelocking_ctx Prelocking context of the statement.
@param table_list Table list element for table with trigger.
@retval FALSE Success.
@retval TRUE Failure.
*/
bool
Table_triggers_list::
add_tables_and_routines_for_triggers(THD *thd,
Query_tables_list *prelocking_ctx,
TABLE_LIST *table_list)
{
DBUG_ASSERT(static_cast<int>(table_list->lock_type) >=
static_cast<int>(TL_WRITE_ALLOW_WRITE));
for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
{
if (table_list->trg_event_map &
static_cast<uint8>(1 << static_cast<int>(i)))
{
for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
{
/* We can have only one trigger per action type currently */
sp_head *trigger= table_list->table->triggers->bodies[i][j];
if (trigger)
{
MDL_key key(MDL_key::TRIGGER, trigger->m_db.str, trigger->m_name.str);
if (sp_add_used_routine(prelocking_ctx, thd->stmt_arena,
&key, table_list->belong_to_view))
{
trigger->add_used_tables_to_table_list(thd,
&prelocking_ctx->query_tables_last,
table_list->belong_to_view);
sp_update_stmt_used_routines(thd, prelocking_ctx,
&trigger->m_sroutines,
table_list->belong_to_view);
trigger->propagate_attributes(prelocking_ctx);
}
}
}
}
}
return FALSE;
}
2007-10-16 21:37:31 +02:00
/**
Mark fields of subject table which we read/set in its triggers
as such.
This method marks fields of subject table which are read/set in its
triggers as such (by properly updating TABLE::read_set/write_set)
and thus informs handler that values for these fields should be
retrieved/stored during execution of statement.
@param thd Current thread context
@param event Type of event triggers for which we are going to inspect
Fix for bug#18437 "Wrong values inserted with a before update trigger on NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens.
2006-07-01 23:51:10 +02:00
*/
void Table_triggers_list::mark_fields_used(trg_event_type event)
Fix for bug#18437 "Wrong values inserted with a before update trigger on NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens.
2006-07-01 23:51:10 +02:00
{
int action_time;
Item_trigger_field *trg_field;
for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++)
{
for (trg_field= trigger_fields[event][action_time]; trg_field;
trg_field= trg_field->next_trg_field)
{
/* We cannot mark fields which does not present in table. */
if (trg_field->field_idx != (uint)-1)
{
bitmap_set_bit(trigger_table->read_set, trg_field->field_idx);
if (trg_field->get_settable_routine_parameter())
bitmap_set_bit(trigger_table->write_set, trg_field->field_idx);
}
Fix for bug#18437 "Wrong values inserted with a before update trigger on NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens.
2006-07-01 23:51:10 +02:00
}
}
trigger_table->file->column_bitmaps_signal();
Fix for bug#18437 "Wrong values inserted with a before update trigger on NDB table". SQL-layer was not marking fields which were used in triggers as such. As result these fields were not always properly retrieved/stored by handler layer. So one might got wrong values or lost changes in triggers for NDB, Federated and possibly InnoDB tables. This fix solves the problem by marking fields used in triggers appropriately. Also this patch contains the following cleanup of ha_ndbcluster code: We no longer rely on reading LEX::sql_command value in handler in order to determine if we can enable optimization which allows us to handle REPLACE statement in more efficient way by doing replaces directly in write_row() method without reporting error to SQL-layer. Instead we rely on SQL-layer informing us whether this optimization applicable by calling handler::extra() method with HA_EXTRA_WRITE_CAN_REPLACE flag. As result we no longer apply this optimzation in cases when it should not be used (e.g. if we have on delete triggers on table) and use in some additional cases when it is applicable (e.g. for LOAD DATA REPLACE). Finally this patch includes fix for bug#20728 "REPLACE does not work correctly for NDB table with PK and unique index". This was yet another problem which was caused by improper field mark-up. During row replacement fields which weren't explicity used in REPLACE statement were not marked as fields to be saved (updated) so they have retained values from old row version. The fix is to mark all table fields as set for REPLACE statement. Note that in 5.1 we already solve this problem by notifying handler that it should save values from all fields only in case when real replacement happens.
2006-07-01 23:51:10 +02:00
}
2007-10-16 21:37:31 +02:00
/**
Trigger BUG#14090 compatibility hook.
2007-10-16 21:37:31 +02:00
@param[in,out] unknown_key reference on the line with unknown
parameter and the parsing point
@param[in] base base address for parameter writing
(structure like TABLE)
@param[in] mem_root MEM_ROOT for parameters allocation
@param[in] end the end of the configuration
2007-10-16 21:37:31 +02:00
@note
NOTE: this hook process back compatibility for incorrectly written
sql_modes parameter (see BUG#14090).
2007-10-16 21:37:31 +02:00
@retval
FALSE OK
2007-10-16 21:37:31 +02:00
@retval
TRUE Error
*/
#define INVALID_SQL_MODES_LENGTH 13
bool
Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key,
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
uchar* base,
MEM_ROOT *mem_root,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string");
DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end &&
unknown_key[INVALID_SQL_MODES_LENGTH] == '=' &&
!memcmp(unknown_key, STRING_WITH_LEN("sql_modes")))
{
char *ptr= unknown_key + INVALID_SQL_MODES_LENGTH + 1;
DBUG_PRINT("info", ("sql_modes affected by BUG#14090 detected"));
push_warning_printf(current_thd,
MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_OLD_FILE_FORMAT,
ER(ER_OLD_FILE_FORMAT),
(char *)path, "TRIGGER");
if (get_file_options_ulllist(ptr, end, unknown_key, base,
&sql_modes_parameters, mem_root))
{
DBUG_RETURN(TRUE);
}
/*
Set parsing pointer to the last symbol of string (\n)
1) to avoid problem with \0 in the junk after sql_modes
2) to speed up skipping this line by parser.
*/
unknown_key= ptr-1;
}
DBUG_RETURN(FALSE);
}
2007-10-16 21:37:31 +02:00
#define INVALID_TRIGGER_TABLE_LENGTH 15
/**
Trigger BUG#15921 compatibility hook. For details see
Handle_old_incorrect_sql_modes_hook::process_unknown_string().
*/
bool
Handle_old_incorrect_trigger_table_hook::
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
process_unknown_string(char *&unknown_key, uchar* base, MEM_ROOT *mem_root,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string");
DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end &&
unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' &&
!memcmp(unknown_key, STRING_WITH_LEN("trigger_table")))
{
char *ptr= unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1;
DBUG_PRINT("info", ("trigger_table affected by BUG#15921 detected"));
push_warning_printf(current_thd,
MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_OLD_FILE_FORMAT,
ER(ER_OLD_FILE_FORMAT),
(char *)path, "TRIGGER");
if (!(ptr= parse_escaped_string(ptr, end, mem_root, trigger_table_value)))
{
my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), "trigger_table",
unknown_key);
DBUG_RETURN(TRUE);
}
/* Set parsing pointer to the last symbol of string (\n). */
unknown_key= ptr-1;
}
DBUG_RETURN(FALSE);
}
/**
Contruct path to TRN-file.
2007-10-16 21:37:31 +02:00
@param thd[in] Thread context.
@param trg_name[in] Trigger name.
@param trn_path[out] Variable to store constructed path
*/
void build_trn_path(THD *thd, const sp_name *trg_name, LEX_STRING *trn_path)
{
/* Construct path to the TRN-file. */
trn_path->length= build_table_filename(trn_path->str,
FN_REFLEN - 1,
trg_name->m_db.str,
trg_name->m_name.str,
TRN_EXT,
0);
}
/**
Check if TRN-file exists.
@return
@retval TRUE if TRN-file does not exist.
@retval FALSE if TRN-file exists.
*/
bool check_trn_exists(const LEX_STRING *trn_path)
{
return access(trn_path->str, F_OK) != 0;
}
/**
Retrieve table name for given trigger.
2007-10-16 21:37:31 +02:00
@param thd[in] Thread context.
@param trg_name[in] Trigger name.
@param trn_path[in] Path to the corresponding TRN-file.
@param tbl_name[out] Variable to store retrieved table name.
@return Error status.
@retval FALSE on success.
@retval TRUE if table name could not be retrieved.
*/
bool load_table_name_for_trigger(THD *thd,
const sp_name *trg_name,
const LEX_STRING *trn_path,
LEX_STRING *tbl_name)
{
File_parser *parser;
struct st_trigname trn_data;
Handle_old_incorrect_trigger_table_hook trigger_table_hook(
trn_path->str,
&trn_data.trigger_table);
DBUG_ENTER("load_table_name_for_trigger");
/* Parse the TRN-file. */
if (!(parser= sql_parse_prepare(trn_path, thd->mem_root, TRUE)))
DBUG_RETURN(TRUE);
if (!is_equal(&trigname_file_type, parser->type()))
{
my_error(ER_WRONG_OBJECT, MYF(0),
trg_name->m_name.str,
TRN_EXT + 1,
"TRIGGERNAME");
DBUG_RETURN(TRUE);
}
if (parser->parse((uchar*) &trn_data, thd->mem_root,
trigname_file_parameters, 1,
&trigger_table_hook))
DBUG_RETURN(TRUE);
/* Copy trigger table name. */
*tbl_name= trn_data.trigger_table;
/* That's all. */
DBUG_RETURN(FALSE);
}