mariadb/sql/event_data_objects.cc

1584 lines
44 KiB
C++
Raw Normal View History

/* Copyright (C) 2004-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define MYSQL_LEX 1
#include "mysql_priv.h"
#include "events.h"
#include "event_data_objects.h"
#include "event_db_repository.h"
#include "sp_head.h"
/**
@addtogroup Event_Scheduler
@{
*/
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
/*************************************************************************/
/**
Event_creation_ctx -- creation context of events.
*/
class Event_creation_ctx :public Stored_program_creation_ctx,
public Sql_alloc
{
public:
static bool load_from_db(THD *thd,
MEM_ROOT *event_mem_root,
const char *db_name,
const char *event_name,
TABLE *event_tbl,
Stored_program_creation_ctx **ctx);
public:
virtual Stored_program_creation_ctx *clone(MEM_ROOT *mem_root)
{
return new (mem_root)
Event_creation_ctx(m_client_cs, m_connection_cl, m_db_cl);
}
protected:
virtual Object_creation_ctx *create_backup_ctx(THD *thd) const
{
/*
We can avoid usual backup/restore employed in stored programs since we
know that this is a top level statement and the worker thread is
allocated exclusively to execute this event.
*/
return NULL;
}
private:
Event_creation_ctx(CHARSET_INFO *client_cs,
CHARSET_INFO *connection_cl,
CHARSET_INFO *db_cl)
: Stored_program_creation_ctx(client_cs, connection_cl, db_cl)
{ }
};
/**************************************************************************
Event_creation_ctx implementation.
**************************************************************************/
bool
Event_creation_ctx::load_from_db(THD *thd,
MEM_ROOT *event_mem_root,
const char *db_name,
const char *event_name,
TABLE *event_tbl,
Stored_program_creation_ctx **ctx)
{
/* Load character set/collation attributes. */
CHARSET_INFO *client_cs;
CHARSET_INFO *connection_cl;
CHARSET_INFO *db_cl;
bool invalid_creation_ctx= FALSE;
if (load_charset(event_mem_root,
event_tbl->field[ET_FIELD_CHARACTER_SET_CLIENT],
thd->variables.character_set_client,
&client_cs))
{
sql_print_warning("Event '%s'.'%s': invalid value "
"in column mysql.event.character_set_client.",
(const char *) db_name,
(const char *) event_name);
invalid_creation_ctx= TRUE;
}
if (load_collation(event_mem_root,
event_tbl->field[ET_FIELD_COLLATION_CONNECTION],
thd->variables.collation_connection,
&connection_cl))
{
sql_print_warning("Event '%s'.'%s': invalid value "
"in column mysql.event.collation_connection.",
(const char *) db_name,
(const char *) event_name);
invalid_creation_ctx= TRUE;
}
if (load_collation(event_mem_root,
event_tbl->field[ET_FIELD_DB_COLLATION],
NULL,
&db_cl))
{
sql_print_warning("Event '%s'.'%s': invalid value "
"in column mysql.event.db_collation.",
(const char *) db_name,
(const char *) event_name);
invalid_creation_ctx= TRUE;
}
/*
If we failed to resolve the database collation, load the default one
from the disk.
*/
if (!db_cl)
db_cl= get_default_db_collation(thd, db_name);
/* Create the context. */
*ctx= new Event_creation_ctx(client_cs, connection_cl, db_cl);
return invalid_creation_ctx;
}
/*************************************************************************/
/*
Initiliazes dbname and name of an Event_queue_element_for_exec
object
SYNOPSIS
Event_queue_element_for_exec::init()
RETURN VALUE
FALSE OK
TRUE Error (OOM)
*/
bool
Event_queue_element_for_exec::init(LEX_STRING db, LEX_STRING n)
{
if (!(dbname.str= my_strndup(db.str, dbname.length= db.length, MYF(MY_WME))))
return TRUE;
if (!(name.str= my_strndup(n.str, name.length= n.length, MYF(MY_WME))))
{
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
my_free((uchar*) dbname.str, MYF(0));
return TRUE;
}
return FALSE;
}
/*
Destructor
SYNOPSIS
Event_queue_element_for_exec::~Event_queue_element_for_exec()
*/
Event_queue_element_for_exec::~Event_queue_element_for_exec()
{
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
my_free((uchar*) dbname.str, MYF(0));
my_free((uchar*) name.str, MYF(0));
}
/*
Constructor
SYNOPSIS
Event_basic::Event_basic()
*/
Event_basic::Event_basic()
{
DBUG_ENTER("Event_basic::Event_basic");
/* init memory root */
init_alloc_root(&mem_root, 256, 512);
dbname.str= name.str= NULL;
dbname.length= name.length= 0;
time_zone= NULL;
DBUG_VOID_RETURN;
}
/*
Destructor
SYNOPSIS
Event_basic::Event_basic()
*/
Event_basic::~Event_basic()
{
DBUG_ENTER("Event_basic::~Event_basic");
free_root(&mem_root, MYF(0));
DBUG_VOID_RETURN;
}
/*
Short function to load a char column into a LEX_STRING
SYNOPSIS
Event_basic::load_string_field()
field_name The field( enum_events_table_field is not actually used
because it's unknown in event_data_objects.h)
fields The Field array
field_value The value
*/
bool
Event_basic::load_string_fields(Field **fields, ...)
{
bool ret= FALSE;
va_list args;
enum enum_events_table_field field_name;
LEX_STRING *field_value;
DBUG_ENTER("Event_basic::load_string_fields");
va_start(args, fields);
field_name= (enum enum_events_table_field) va_arg(args, int);
while (field_name < ET_FIELD_COUNT)
{
field_value= va_arg(args, LEX_STRING *);
if ((field_value->str= get_field(&mem_root, fields[field_name])) == NullS)
{
ret= TRUE;
break;
}
field_value->length= strlen(field_value->str);
field_name= (enum enum_events_table_field) va_arg(args, int);
}
va_end(args);
DBUG_RETURN(ret);
}
bool
Event_basic::load_time_zone(THD *thd, const LEX_STRING tz_name)
{
String str(tz_name.str, &my_charset_latin1);
time_zone= my_tz_find(thd, &str);
return (time_zone == NULL);
}
/*
Constructor
SYNOPSIS
Event_queue_element::Event_queue_element()
*/
Event_queue_element::Event_queue_element():
status_changed(FALSE), last_executed_changed(FALSE),
on_completion(Event_parse_data::ON_COMPLETION_DROP),
status(Event_parse_data::ENABLED), expression(0), dropped(FALSE),
execution_count(0)
{
DBUG_ENTER("Event_queue_element::Event_queue_element");
starts= ends= execute_at= last_executed= 0;
starts_null= ends_null= execute_at_null= TRUE;
DBUG_VOID_RETURN;
}
fix for bug#16642 (Events: No INFORMATION_SCHEMA.EVENTS table) post-review change - use pointer instead of copy on the stack. WL#1034 (Internal CRON) This patch adds INFORMATION_SCHEMA.EVENTS table with the following format: EVENT_CATALOG - MYSQL_TYPE_STRING (Always NULL) EVENT_SCHEMA - MYSQL_TYPE_STRING (the database) EVENT_NAME - MYSQL_TYPE_STRING (the name) DEFINER - MYSQL_TYPE_STRING (user@host) EVENT_BODY - MYSQL_TYPE_STRING (the body from mysql.event) EVENT_TYPE - MYSQL_TYPE_STRING ("ONE TIME" | "RECURRING") EXECUTE_AT - MYSQL_TYPE_TIMESTAMP (set for "ONE TIME" otherwise NULL) INTERVAL_VALUE - MYSQL_TYPE_LONG (set for RECURRING otherwise NULL) INTERVAL_FIELD - MYSQL_TYPE_STRING (set for RECURRING otherwise NULL) SQL_MODE - MYSQL_TYPE_STRING (for now NULL) STARTS - MYSQL_TYPE_TIMESTAMP (starts from mysql.event) ENDS - MYSQL_TYPE_TIMESTAMP (ends from mysql.event) STATUS - MYSQL_TYPE_STRING (ENABLED | DISABLED) ON_COMPLETION - MYSQL_TYPE_STRING (NOT PRESERVE | PRESERVE) CREATED - MYSQL_TYPE_TIMESTAMP LAST_ALTERED - MYSQL_TYPE_TIMESTAMP LAST_EXECUTED - MYSQL_TYPE_TIMESTAMP EVENT_COMMENT - MYSQL_TYPE_STRING SQL_MODE is NULL for now, because the value is still not stored in mysql.event . Support will be added as a fix for another bug. This patch also adds SHOW [FULL] EVENTS [FROM db] [LIKE pattern] 1. SHOW EVENTS shows always only the events on the same user, because the PK of mysql.event is (definer, db, name) several users may have event with the same name -> no information disclosure. 2. SHOW FULL EVENTS - shows the events (in the current db as SHOW EVENTS) of all users. The user has to have PROCESS privilege, if not then SHOW FULL EVENTS behave like SHOW EVENTS. 3. If [FROM db] is specified then this db is considered. 4. Event names can be filtered with LIKE pattern. SHOW EVENTS returns table with the following columns, which are subset of the data which is returned by SELECT * FROM I_S.EVENTS Db Name Definer Type Execute at Interval value Interval field Starts Ends Status
2006-01-30 13:15:23 +01:00
/*
Destructor
SYNOPSIS
Event_queue_element::Event_queue_element()
*/
Event_queue_element::~Event_queue_element()
{
}
/*
Constructor
SYNOPSIS
Event_timed::Event_timed()
*/
Event_timed::Event_timed():
created(0), modified(0), sql_mode(0)
{
DBUG_ENTER("Event_timed::Event_timed");
init();
DBUG_VOID_RETURN;
}
/*
Destructor
SYNOPSIS
Event_timed::~Event_timed()
*/
Event_timed::~Event_timed()
{
}
/*
Constructor
SYNOPSIS
Event_job_data::Event_job_data()
*/
Event_job_data::Event_job_data()
:sql_mode(0)
{
}
/*
Init all member variables
SYNOPSIS
Event_timed::init()
*/
void
Event_timed::init()
{
DBUG_ENTER("Event_timed::init");
definer_user.str= definer_host.str= body.str= comment.str= NULL;
definer_user.length= definer_host.length= body.length= comment.length= 0;
sql_mode= 0;
DBUG_VOID_RETURN;
}
/**
Load an event's body from a row from mysql.event.
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
@details This method is silent on errors and should behave like that.
Callers should handle throwing of error messages. The reason is that the
class should not know about how to deal with communication.
@return Operation status
@retval FALSE OK
@retval TRUE Error
*/
bool
Event_job_data::load_from_row(THD *thd, TABLE *table)
{
char *ptr;
uint len;
LEX_STRING tz_name;
DBUG_ENTER("Event_job_data::load_from_row");
if (!table)
DBUG_RETURN(TRUE);
if (table->s->fields < ET_FIELD_COUNT)
DBUG_RETURN(TRUE);
if (load_string_fields(table->field,
ET_FIELD_DB, &dbname,
ET_FIELD_NAME, &name,
ET_FIELD_BODY, &body,
ET_FIELD_DEFINER, &definer,
ET_FIELD_TIME_ZONE, &tz_name,
ET_FIELD_COUNT))
DBUG_RETURN(TRUE);
if (load_time_zone(thd, tz_name))
DBUG_RETURN(TRUE);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
Event_creation_ctx::load_from_db(thd, &mem_root, dbname.str, name.str, table,
&creation_ctx);
ptr= strchr(definer.str, '@');
if (! ptr)
ptr= definer.str;
len= ptr - definer.str;
definer_user.str= strmake_root(&mem_root, definer.str, len);
definer_user.length= len;
len= definer.length - len - 1;
/* 1:because of @ */
definer_host.str= strmake_root(&mem_root, ptr + 1, len);
definer_host.length= len;
sql_mode= (ulong) table->field[ET_FIELD_SQL_MODE]->val_int();
DBUG_RETURN(FALSE);
}
/**
Load an event's body from a row from mysql.event.
@details This method is silent on errors and should behave like that.
Callers should handle throwing of error messages. The reason is that the
class should not know about how to deal with communication.
@return Operation status
@retval FALSE OK
@retval TRUE Error
*/
bool
Event_queue_element::load_from_row(THD *thd, TABLE *table)
{
char *ptr;
MYSQL_TIME time;
LEX_STRING tz_name;
DBUG_ENTER("Event_queue_element::load_from_row");
if (!table)
DBUG_RETURN(TRUE);
if (table->s->fields < ET_FIELD_COUNT)
DBUG_RETURN(TRUE);
if (load_string_fields(table->field,
ET_FIELD_DB, &dbname,
ET_FIELD_NAME, &name,
ET_FIELD_DEFINER, &definer,
ET_FIELD_TIME_ZONE, &tz_name,
ET_FIELD_COUNT))
DBUG_RETURN(TRUE);
if (load_time_zone(thd, tz_name))
DBUG_RETURN(TRUE);
starts_null= table->field[ET_FIELD_STARTS]->is_null();
my_bool not_used= FALSE;
if (!starts_null)
{
table->field[ET_FIELD_STARTS]->get_date(&time, TIME_NO_ZERO_DATE);
starts= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
ends_null= table->field[ET_FIELD_ENDS]->is_null();
if (!ends_null)
{
table->field[ET_FIELD_ENDS]->get_date(&time, TIME_NO_ZERO_DATE);
ends= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
if (!table->field[ET_FIELD_INTERVAL_EXPR]->is_null())
expression= table->field[ET_FIELD_INTERVAL_EXPR]->val_int();
else
expression= 0;
/*
If neigher STARTS and ENDS is set, then both fields are empty.
Hence, if ET_FIELD_EXECUTE_AT is empty there is an error.
*/
execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null();
DBUG_ASSERT(!(starts_null && ends_null && !expression && execute_at_null));
if (!expression && !execute_at_null)
{
if (table->field[ET_FIELD_EXECUTE_AT]->get_date(&time,
TIME_NO_ZERO_DATE))
DBUG_RETURN(TRUE);
execute_at= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
/*
We load the interval type from disk as string and then map it to
an integer. This decouples the values of enum interval_type
and values actually stored on disk. Therefore the type can be
reordered without risking incompatibilities of data between versions.
*/
if (!table->field[ET_FIELD_TRANSIENT_INTERVAL]->is_null())
{
int i;
char buff[MAX_FIELD_WIDTH];
String str(buff, sizeof(buff), &my_charset_bin);
LEX_STRING tmp;
table->field[ET_FIELD_TRANSIENT_INTERVAL]->val_str(&str);
if (!(tmp.length= str.length()))
DBUG_RETURN(TRUE);
tmp.str= str.c_ptr_safe();
i= find_string_in_array(interval_type_to_name, &tmp, system_charset_info);
if (i < 0)
DBUG_RETURN(TRUE);
interval= (interval_type) i;
}
if (!table->field[ET_FIELD_LAST_EXECUTED]->is_null())
{
table->field[ET_FIELD_LAST_EXECUTED]->get_date(&time,
TIME_NO_ZERO_DATE);
last_executed= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
last_executed_changed= FALSE;
if ((ptr= get_field(&mem_root, table->field[ET_FIELD_STATUS])) == NullS)
DBUG_RETURN(TRUE);
DBUG_PRINT("load_from_row", ("Event [%s] is [%s]", name.str, ptr));
/* Set event status (ENABLED | SLAVESIDE_DISABLED | DISABLED) */
switch (ptr[0])
{
case 'E' :
status = Event_parse_data::ENABLED;
break;
case 'S' :
status = Event_parse_data::SLAVESIDE_DISABLED;
break;
case 'D' :
default:
status = Event_parse_data::DISABLED;
break;
}
if ((ptr= get_field(&mem_root, table->field[ET_FIELD_ORIGINATOR])) == NullS)
DBUG_RETURN(TRUE);
originator = table->field[ET_FIELD_ORIGINATOR]->val_int();
/* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */
if ((ptr= get_field(&mem_root,
table->field[ET_FIELD_ON_COMPLETION])) == NullS)
DBUG_RETURN(TRUE);
on_completion= (ptr[0]=='D'? Event_parse_data::ON_COMPLETION_DROP:
Event_parse_data::ON_COMPLETION_PRESERVE);
DBUG_RETURN(FALSE);
}
/**
Load an event's body from a row from mysql.event.
@details This method is silent on errors and should behave like that.
Callers should handle throwing of error messages. The reason is that the
class should not know about how to deal with communication.
@return Operation status
@retval FALSE OK
@retval TRUE Error
*/
bool
Event_timed::load_from_row(THD *thd, TABLE *table)
{
char *ptr;
uint len;
DBUG_ENTER("Event_timed::load_from_row");
if (Event_queue_element::load_from_row(thd, table))
DBUG_RETURN(TRUE);
if (load_string_fields(table->field,
ET_FIELD_BODY, &body,
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
ET_FIELD_BODY_UTF8, &body_utf8,
ET_FIELD_COUNT))
DBUG_RETURN(TRUE);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (Event_creation_ctx::load_from_db(thd, &mem_root, dbname.str, name.str,
table, &creation_ctx))
{
push_warning_printf(thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
ER_EVENT_INVALID_CREATION_CTX,
ER(ER_EVENT_INVALID_CREATION_CTX),
(const char *) dbname.str,
(const char *) name.str);
}
ptr= strchr(definer.str, '@');
if (! ptr)
ptr= definer.str;
len= ptr - definer.str;
definer_user.str= strmake_root(&mem_root, definer.str, len);
definer_user.length= len;
len= definer.length - len - 1;
/* 1:because of @ */
definer_host.str= strmake_root(&mem_root, ptr + 1, len);
definer_host.length= len;
created= table->field[ET_FIELD_CREATED]->val_int();
modified= table->field[ET_FIELD_MODIFIED]->val_int();
comment.str= get_field(&mem_root, table->field[ET_FIELD_COMMENT]);
if (comment.str != NullS)
comment.length= strlen(comment.str);
else
comment.length= 0;
sql_mode= (ulong) table->field[ET_FIELD_SQL_MODE]->val_int();
DBUG_RETURN(FALSE);
}
/*
add_interval() adds a specified interval to time 'ltime' in time
zone 'time_zone', and returns the result converted to the number of
seconds since epoch (aka Unix time; in UTC time zone). Zero result
means an error.
*/
static
my_time_t
add_interval(MYSQL_TIME *ltime, const Time_zone *time_zone,
interval_type scale, INTERVAL interval)
{
if (date_add_interval(ltime, scale, interval))
return 0;
my_bool not_used;
return time_zone->TIME_to_gmt_sec(ltime, &not_used);
}
/*
Computes the sum of a timestamp plus interval.
SYNOPSIS
get_next_time()
time_zone event time zone
next the sum
start add interval_value to this time
time_now current time
i_value quantity of time type interval to add
i_type type of interval to add (SECOND, MINUTE, HOUR, WEEK ...)
RETURN VALUE
0 OK
1 Error
NOTES
1) If the interval is conversible to SECOND, like MINUTE, HOUR, DAY, WEEK.
Then we use TIMEDIFF()'s implementation as underlying and number of
seconds as resolution for computation.
2) In all other cases - MONTH, QUARTER, YEAR we use MONTH as resolution
and PERIOD_DIFF()'s implementation
*/
static
bool get_next_time(const Time_zone *time_zone, my_time_t *next,
my_time_t start, my_time_t time_now,
int i_value, interval_type i_type)
{
DBUG_ENTER("get_next_time");
DBUG_PRINT("enter", ("start: %lu now: %lu", (long) start, (long) time_now));
DBUG_ASSERT(start <= time_now);
longlong months=0, seconds=0;
switch (i_type) {
case INTERVAL_YEAR:
months= i_value*12;
break;
case INTERVAL_QUARTER:
/* Has already been converted to months */
case INTERVAL_YEAR_MONTH:
case INTERVAL_MONTH:
months= i_value;
break;
case INTERVAL_WEEK:
/* WEEK has already been converted to days */
case INTERVAL_DAY:
seconds= i_value*24*3600;
break;
case INTERVAL_DAY_HOUR:
case INTERVAL_HOUR:
seconds= i_value*3600;
break;
case INTERVAL_DAY_MINUTE:
case INTERVAL_HOUR_MINUTE:
case INTERVAL_MINUTE:
seconds= i_value*60;
break;
case INTERVAL_DAY_SECOND:
case INTERVAL_HOUR_SECOND:
case INTERVAL_MINUTE_SECOND:
case INTERVAL_SECOND:
seconds= i_value;
break;
case INTERVAL_DAY_MICROSECOND:
case INTERVAL_HOUR_MICROSECOND:
case INTERVAL_MINUTE_MICROSECOND:
case INTERVAL_SECOND_MICROSECOND:
case INTERVAL_MICROSECOND:
/*
We should return an error here so SHOW EVENTS/ SELECT FROM I_S.EVENTS
would give an error then.
*/
DBUG_RETURN(1);
break;
case INTERVAL_LAST:
DBUG_ASSERT(0);
}
DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months));
MYSQL_TIME local_start;
MYSQL_TIME local_now;
/* Convert times from UTC to local. */
{
time_zone->gmt_sec_to_TIME(&local_start, start);
time_zone->gmt_sec_to_TIME(&local_now, time_now);
}
INTERVAL interval;
bzero(&interval, sizeof(interval));
my_time_t next_time= 0;
if (seconds)
{
longlong seconds_diff;
long microsec_diff;
bool negative= calc_time_diff(&local_now, &local_start, 1,
&seconds_diff, &microsec_diff);
if (!negative)
{
/*
The formula below returns the interval that, when added to
local_start, will always give the time in the future.
*/
interval.second= seconds_diff - seconds_diff % seconds + seconds;
next_time= add_interval(&local_start, time_zone,
INTERVAL_SECOND, interval);
if (next_time == 0)
goto done;
}
if (next_time <= time_now)
{
/*
If 'negative' is true above, then 'next_time == 0', and
'next_time <= time_now' is also true. If negative is false,
then next_time was set, but perhaps to the value that is less
then time_now. See below for elaboration.
*/
DBUG_ASSERT(negative || next_time > 0);
/*
If local_now < local_start, i.e. STARTS time is in the future
according to the local time (it always in the past according
to UTC---this is a prerequisite of this function), then
STARTS is almost always in the past according to the local
time too. However, in the time zone that has backward
Daylight Saving Time shift, the following may happen: suppose
we have a backward DST shift at certain date after 2:59:59,
i.e. local time goes 1:59:59, 2:00:00, ... , 2:59:59, (shift
here) 2:00:00 (again), ... , 2:59:59 (again), 3:00:00, ... .
Now suppose the time has passed the first 2:59:59, has been
shifted backward, and now is (the second) 2:20:00. The user
does CREATE EVENT with STARTS 'current-date 2:40:00'. Local
time 2:40:00 from create statement is treated by time
functions as the first such time, so according to UTC it comes
before the second 2:20:00. But according to local time it is
obviously in the future, so we end up in this branch.
Since we are in the second pass through 2:00:00--2:59:59, and
any local time form this interval is treated by system
functions as the time from the first pass, we have to find the
time for the next execution that is past the DST-affected
interval (past the second 2:59:59 for our example,
i.e. starting from 3:00:00). We do this in the loop until the
local time is mapped onto future UTC time. 'start' time is in
the past, so we may use 'do { } while' here, and add the first
interval right away.
Alternatively, it could be that local_now >= local_start. Now
for the example above imagine we do CREATE EVENT with STARTS
'current-date 2:10:00'. Local start 2:10 is in the past (now
is local 2:20), so we add an interval, and get next execution
time, say, 2:40. It is in the future according to local time,
but, again, since we are in the second pass through
2:00:00--2:59:59, 2:40 will be converted into UTC time in the
past. So we will end up in this branch again, and may add
intervals in a 'do { } while' loop.
Note that for any given event we may end up here only if event
next execution time will map to the time interval that is
passed twice, and only if the server was started during the
second pass, or the event is being created during the second
pass. After that, we never will get here (unless we again
start the server during the second pass). In other words,
such a condition is extremely rare.
*/
interval.second= seconds;
do
{
next_time= add_interval(&local_start, time_zone,
INTERVAL_SECOND, interval);
if (next_time == 0)
goto done;
}
while (next_time <= time_now);
}
}
else
{
long diff_months= (long) (local_now.year - local_start.year)*12 +
(local_now.month - local_start.month);
/*
Unlike for seconds above, the formula below returns the interval
that, when added to the local_start, will give the time in the
past, or somewhere in the current month. We are interested in
the latter case, to see if this time has already passed, or is
yet to come this month.
Note that the time is guaranteed to be in the past unless
(diff_months % months == 0), but no good optimization is
possible here, because (diff_months % months == 0) is what will
happen most of the time, as get_next_time() will be called right
after the execution of the event. We could pass last_executed
time to this function, and see if the execution has already
happened this month, but for that we will have to convert
last_executed from seconds since epoch to local broken-down
time, and this will greatly reduce the effect of the
optimization. So instead we keep the code simple and clean.
*/
interval.month= (ulong) (diff_months - diff_months % months);
next_time= add_interval(&local_start, time_zone,
INTERVAL_MONTH, interval);
if (next_time == 0)
goto done;
if (next_time <= time_now)
{
interval.month= (ulong) months;
next_time= add_interval(&local_start, time_zone,
INTERVAL_MONTH, interval);
if (next_time == 0)
goto done;
}
}
DBUG_ASSERT(time_now < next_time);
*next= next_time;
done:
DBUG_PRINT("info", ("next_time: %ld", (long) next_time));
DBUG_RETURN(next_time == 0);
}
/*
Computes next execution time.
SYNOPSIS
Event_queue_element::compute_next_execution_time()
RETURN VALUE
FALSE OK
TRUE Error
NOTES
The time is set in execute_at, if no more executions the latter is
set to 0.
*/
bool
Event_queue_element::compute_next_execution_time()
{
my_time_t time_now;
DBUG_ENTER("Event_queue_element::compute_next_execution_time");
DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx",
(long) starts, (long) ends, (long) last_executed,
(long) this));
if (status != Event_parse_data::ENABLED)
{
DBUG_PRINT("compute_next_execution_time",
("Event %s is DISABLED", name.str));
goto ret;
}
/* If one-time, no need to do computation */
if (!expression)
{
/* Let's check whether it was executed */
if (last_executed)
{
DBUG_PRINT("info",("One-time event %s.%s of was already executed",
dbname.str, name.str));
dropped= (on_completion == Event_parse_data::ON_COMPLETION_DROP);
DBUG_PRINT("info",("One-time event will be dropped: %d.", dropped));
status= Event_parse_data::DISABLED;
status_changed= TRUE;
}
goto ret;
}
2007-03-21 18:32:15 +01:00
time_now= (my_time_t) current_thd->query_start();
fix for bug#16642 (Events: No INFORMATION_SCHEMA.EVENTS table) post-review change - use pointer instead of copy on the stack. WL#1034 (Internal CRON) This patch adds INFORMATION_SCHEMA.EVENTS table with the following format: EVENT_CATALOG - MYSQL_TYPE_STRING (Always NULL) EVENT_SCHEMA - MYSQL_TYPE_STRING (the database) EVENT_NAME - MYSQL_TYPE_STRING (the name) DEFINER - MYSQL_TYPE_STRING (user@host) EVENT_BODY - MYSQL_TYPE_STRING (the body from mysql.event) EVENT_TYPE - MYSQL_TYPE_STRING ("ONE TIME" | "RECURRING") EXECUTE_AT - MYSQL_TYPE_TIMESTAMP (set for "ONE TIME" otherwise NULL) INTERVAL_VALUE - MYSQL_TYPE_LONG (set for RECURRING otherwise NULL) INTERVAL_FIELD - MYSQL_TYPE_STRING (set for RECURRING otherwise NULL) SQL_MODE - MYSQL_TYPE_STRING (for now NULL) STARTS - MYSQL_TYPE_TIMESTAMP (starts from mysql.event) ENDS - MYSQL_TYPE_TIMESTAMP (ends from mysql.event) STATUS - MYSQL_TYPE_STRING (ENABLED | DISABLED) ON_COMPLETION - MYSQL_TYPE_STRING (NOT PRESERVE | PRESERVE) CREATED - MYSQL_TYPE_TIMESTAMP LAST_ALTERED - MYSQL_TYPE_TIMESTAMP LAST_EXECUTED - MYSQL_TYPE_TIMESTAMP EVENT_COMMENT - MYSQL_TYPE_STRING SQL_MODE is NULL for now, because the value is still not stored in mysql.event . Support will be added as a fix for another bug. This patch also adds SHOW [FULL] EVENTS [FROM db] [LIKE pattern] 1. SHOW EVENTS shows always only the events on the same user, because the PK of mysql.event is (definer, db, name) several users may have event with the same name -> no information disclosure. 2. SHOW FULL EVENTS - shows the events (in the current db as SHOW EVENTS) of all users. The user has to have PROCESS privilege, if not then SHOW FULL EVENTS behave like SHOW EVENTS. 3. If [FROM db] is specified then this db is considered. 4. Event names can be filtered with LIKE pattern. SHOW EVENTS returns table with the following columns, which are subset of the data which is returned by SELECT * FROM I_S.EVENTS Db Name Definer Type Execute at Interval value Interval field Starts Ends Status
2006-01-30 13:15:23 +01:00
DBUG_PRINT("info",("NOW: [%lu]", (ulong) time_now));
fix for bug#16642 (Events: No INFORMATION_SCHEMA.EVENTS table) post-review change - use pointer instead of copy on the stack. WL#1034 (Internal CRON) This patch adds INFORMATION_SCHEMA.EVENTS table with the following format: EVENT_CATALOG - MYSQL_TYPE_STRING (Always NULL) EVENT_SCHEMA - MYSQL_TYPE_STRING (the database) EVENT_NAME - MYSQL_TYPE_STRING (the name) DEFINER - MYSQL_TYPE_STRING (user@host) EVENT_BODY - MYSQL_TYPE_STRING (the body from mysql.event) EVENT_TYPE - MYSQL_TYPE_STRING ("ONE TIME" | "RECURRING") EXECUTE_AT - MYSQL_TYPE_TIMESTAMP (set for "ONE TIME" otherwise NULL) INTERVAL_VALUE - MYSQL_TYPE_LONG (set for RECURRING otherwise NULL) INTERVAL_FIELD - MYSQL_TYPE_STRING (set for RECURRING otherwise NULL) SQL_MODE - MYSQL_TYPE_STRING (for now NULL) STARTS - MYSQL_TYPE_TIMESTAMP (starts from mysql.event) ENDS - MYSQL_TYPE_TIMESTAMP (ends from mysql.event) STATUS - MYSQL_TYPE_STRING (ENABLED | DISABLED) ON_COMPLETION - MYSQL_TYPE_STRING (NOT PRESERVE | PRESERVE) CREATED - MYSQL_TYPE_TIMESTAMP LAST_ALTERED - MYSQL_TYPE_TIMESTAMP LAST_EXECUTED - MYSQL_TYPE_TIMESTAMP EVENT_COMMENT - MYSQL_TYPE_STRING SQL_MODE is NULL for now, because the value is still not stored in mysql.event . Support will be added as a fix for another bug. This patch also adds SHOW [FULL] EVENTS [FROM db] [LIKE pattern] 1. SHOW EVENTS shows always only the events on the same user, because the PK of mysql.event is (definer, db, name) several users may have event with the same name -> no information disclosure. 2. SHOW FULL EVENTS - shows the events (in the current db as SHOW EVENTS) of all users. The user has to have PROCESS privilege, if not then SHOW FULL EVENTS behave like SHOW EVENTS. 3. If [FROM db] is specified then this db is considered. 4. Event names can be filtered with LIKE pattern. SHOW EVENTS returns table with the following columns, which are subset of the data which is returned by SELECT * FROM I_S.EVENTS Db Name Definer Type Execute at Interval value Interval field Starts Ends Status
2006-01-30 13:15:23 +01:00
2006-02-28 12:08:13 +01:00
/* if time_now is after ends don't execute anymore */
if (!ends_null && ends < time_now)
{
DBUG_PRINT("info", ("NOW after ENDS, don't execute anymore"));
/* time_now is after ends. don't execute anymore */
execute_at= 0;
execute_at_null= TRUE;
if (on_completion == Event_parse_data::ON_COMPLETION_DROP)
dropped= TRUE;
DBUG_PRINT("info", ("Dropped: %d", dropped));
status= Event_parse_data::DISABLED;
status_changed= TRUE;
goto ret;
}
/*
Here time_now is before or equals ends if the latter is set.
Let's check whether time_now is before starts.
If so schedule for starts.
*/
if (!starts_null && time_now <= starts)
{
if (time_now == starts && starts == last_executed)
{
/*
do nothing or we will schedule for second time execution at starts.
*/
}
else
{
DBUG_PRINT("info", ("STARTS is future, NOW <= STARTS,sched for STARTS"));
/*
starts is in the future
time_now before starts. Scheduling for starts
*/
execute_at= starts;
execute_at_null= FALSE;
goto ret;
}
}
if (!starts_null && !ends_null)
{
/*
Both starts and m_ends are set and time_now is between them (incl.)
If last_executed is set then increase with m_expression. The new MYSQL_TIME is
after m_ends set execute_at to 0. And check for on_completion
If not set then schedule for now.
*/
DBUG_PRINT("info", ("Both STARTS & ENDS are set"));
if (!last_executed)
{
DBUG_PRINT("info", ("Not executed so far."));
}
{
my_time_t next_exec;
if (get_next_time(time_zone, &next_exec, starts, time_now,
(int) expression, interval))
goto err;
/* There was previous execution */
if (ends < next_exec)
{
DBUG_PRINT("info", ("Next execution of %s after ENDS. Stop executing.",
name.str));
/* Next execution after ends. No more executions */
execute_at= 0;
execute_at_null= TRUE;
if (on_completion == Event_parse_data::ON_COMPLETION_DROP)
dropped= TRUE;
status= Event_parse_data::DISABLED;
status_changed= TRUE;
}
else
{
DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec));
execute_at= next_exec;
execute_at_null= FALSE;
}
}
goto ret;
}
else if (starts_null && ends_null)
{
/* starts is always set, so this is a dead branch !! */
DBUG_PRINT("info", ("Neither STARTS nor ENDS are set"));
/*
Both starts and m_ends are not set, so we schedule for the next
based on last_executed.
*/
if (last_executed)
{
my_time_t next_exec;
if (get_next_time(time_zone, &next_exec, starts, time_now,
(int) expression, interval))
goto err;
execute_at= next_exec;
DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec));
}
else
{
/* last_executed not set. Schedule the event for now */
DBUG_PRINT("info", ("Execute NOW"));
execute_at= time_now;
}
execute_at_null= FALSE;
}
else
{
2006-02-28 12:08:13 +01:00
/* either starts or m_ends is set */
if (!starts_null)
{
DBUG_PRINT("info", ("STARTS is set"));
/*
- starts is set.
- starts is not in the future according to check made before
Hence schedule for starts + m_expression in case last_executed
is not set, otherwise to last_executed + m_expression
*/
if (!last_executed)
{
DBUG_PRINT("info", ("Not executed so far."));
}
{
my_time_t next_exec;
if (get_next_time(time_zone, &next_exec, starts, time_now,
(int) expression, interval))
goto err;
execute_at= next_exec;
DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec));
}
execute_at_null= FALSE;
}
else
{
/* this is a dead branch, because starts is always set !!! */
DBUG_PRINT("info", ("STARTS is not set. ENDS is set"));
/*
- m_ends is set
- m_ends is after time_now or is equal
Hence check for m_last_execute and increment with m_expression.
If last_executed is not set then schedule for now
*/
if (!last_executed)
execute_at= time_now;
else
{
my_time_t next_exec;
if (get_next_time(time_zone, &next_exec, starts, time_now,
(int) expression, interval))
goto err;
if (ends < next_exec)
{
DBUG_PRINT("info", ("Next execution after ENDS. Stop executing."));
execute_at= 0;
execute_at_null= TRUE;
status= Event_parse_data::DISABLED;
status_changed= TRUE;
if (on_completion == Event_parse_data::ON_COMPLETION_DROP)
dropped= TRUE;
}
else
{
DBUG_PRINT("info", ("Next[%lu]", (ulong) next_exec));
execute_at= next_exec;
execute_at_null= FALSE;
}
}
}
goto ret;
}
ret:
DBUG_PRINT("info", ("ret: 0 execute_at: %lu", (long) execute_at));
DBUG_RETURN(FALSE);
err:
DBUG_PRINT("info", ("ret=1"));
DBUG_RETURN(TRUE);
}
/*
Set the internal last_executed MYSQL_TIME struct to now. NOW is the
time according to thd->query_start(), so the THD's clock.
SYNOPSIS
Event_queue_element::mark_last_executed()
thd thread context
*/
void
Event_queue_element::mark_last_executed(THD *thd)
{
2007-03-21 18:32:15 +01:00
last_executed= (my_time_t) thd->query_start();
last_executed_changed= TRUE;
execution_count++;
}
/*
Saves status and last_executed_at to the disk if changed.
SYNOPSIS
Event_queue_element::update_timing_fields()
thd - thread context
RETURN VALUE
FALSE OK
TRUE Error while opening mysql.event for writing or during
write on disk
*/
bool
Event_queue_element::update_timing_fields(THD *thd)
{
Event_db_repository *db_repository= Events::get_db_repository();
int ret;
DBUG_ENTER("Event_queue_element::update_timing_fields");
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
DBUG_PRINT("enter", ("name: %*s", (int) name.length, name.str));
/* No need to update if nothing has changed */
if (!(status_changed || last_executed_changed))
DBUG_RETURN(0);
ret= db_repository->update_timing_fields_for_event(thd,
dbname, name,
last_executed_changed,
last_executed,
status_changed,
(ulonglong) status);
last_executed_changed= status_changed= FALSE;
DBUG_RETURN(ret);
}
static
void
append_datetime(String *buf, Time_zone *time_zone, my_time_t secs,
const char *name, uint len)
{
char dtime_buff[20*2+32];/* +32 to make my_snprintf_{8bit|ucs2} happy */
buf->append(STRING_WITH_LEN(" "));
buf->append(name, len);
buf->append(STRING_WITH_LEN(" '"));
/*
Pass the buffer and the second param tells fills the buffer and
returns the number of chars to copy.
*/
MYSQL_TIME time;
time_zone->gmt_sec_to_TIME(&time, secs);
buf->append(dtime_buff, my_datetime_to_str(&time, dtime_buff));
buf->append(STRING_WITH_LEN("'"));
}
/*
Get SHOW CREATE EVENT as string
SYNOPSIS
Event_timed::get_create_event(THD *thd, String *buf)
thd Thread
buf String*, should be already allocated. CREATE EVENT goes inside.
RETURN VALUE
0 OK
EVEX_MICROSECOND_UNSUP Error (for now if mysql.event has been
tampered and MICROSECONDS interval or
derivative has been put there.
*/
int
Event_timed::get_create_event(THD *thd, String *buf)
{
char tmp_buf[2 * STRING_BUFFER_USUAL_SIZE];
String expr_buf(tmp_buf, sizeof(tmp_buf), system_charset_info);
expr_buf.length(0);
DBUG_ENTER("get_create_event");
WL#3817: Simplify string / memory area types and make things more consistent (first part) The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
2007-05-10 11:59:39 +02:00
DBUG_PRINT("ret_info",("body_len=[%d]body=[%s]",
(int) body.length, body.str));
if (expression && Events::reconstruct_interval_expression(&expr_buf, interval,
expression))
DBUG_RETURN(EVEX_MICROSECOND_UNSUP);
buf->append(STRING_WITH_LEN("CREATE EVENT "));
append_identifier(thd, buf, name.str, name.length);
if (expression)
{
buf->append(STRING_WITH_LEN(" ON SCHEDULE EVERY "));
buf->append(expr_buf);
buf->append(' ');
LEX_STRING *ival= &interval_type_to_name[interval];
buf->append(ival->str, ival->length);
if (!starts_null)
append_datetime(buf, time_zone, starts, STRING_WITH_LEN("STARTS"));
if (!ends_null)
append_datetime(buf, time_zone, ends, STRING_WITH_LEN("ENDS"));
}
else
{
append_datetime(buf, time_zone, execute_at,
STRING_WITH_LEN("ON SCHEDULE AT"));
}
if (on_completion == Event_parse_data::ON_COMPLETION_DROP)
buf->append(STRING_WITH_LEN(" ON COMPLETION NOT PRESERVE "));
else
buf->append(STRING_WITH_LEN(" ON COMPLETION PRESERVE "));
if (status == Event_parse_data::ENABLED)
buf->append(STRING_WITH_LEN("ENABLE"));
else if (status == Event_parse_data::SLAVESIDE_DISABLED)
buf->append(STRING_WITH_LEN("DISABLE ON SLAVE"));
else
buf->append(STRING_WITH_LEN("DISABLE"));
if (comment.length)
{
buf->append(STRING_WITH_LEN(" COMMENT "));
append_unescaped(buf, comment.str, comment.length);
}
buf->append(STRING_WITH_LEN(" DO "));
buf->append(body.str, body.length);
DBUG_RETURN(0);
}
/**
Get an artificial stored procedure to parse as an event definition.
*/
bool
Event_job_data::construct_sp_sql(THD *thd, String *sp_sql)
{
LEX_STRING buffer;
const uint STATIC_SQL_LENGTH= 44;
DBUG_ENTER("Event_job_data::construct_sp_sql");
/*
Allocate a large enough buffer on the thread execution memory
root to avoid multiple [re]allocations on system heap
*/
buffer.length= STATIC_SQL_LENGTH + name.length + body.length;
if (! (buffer.str= (char*) thd->alloc(buffer.length)))
DBUG_RETURN(TRUE);
sp_sql->set(buffer.str, buffer.length, system_charset_info);
sp_sql->length(0);
sp_sql->append(C_STRING_WITH_LEN("CREATE "));
sp_sql->append(C_STRING_WITH_LEN("PROCEDURE "));
/*
Let's use the same name as the event name to perhaps produce a
better error message in case it is a part of some parse error.
We're using append_identifier here to successfully parse
events with reserved names.
*/
append_identifier(thd, sp_sql, name.str, name.length);
/*
The default SQL security of a stored procedure is DEFINER. We
have already activated the security context of the event, so
let's execute the procedure with the invoker rights to save on
resets of security contexts.
*/
sp_sql->append(C_STRING_WITH_LEN("() SQL SECURITY INVOKER "));
sp_sql->append(body.str, body.length);
DBUG_RETURN(thd->is_fatal_error);
}
/**
Get DROP EVENT statement to binlog the drop of ON COMPLETION NOT
PRESERVE event.
*/
bool
Event_job_data::construct_drop_event_sql(THD *thd, String *sp_sql)
{
LEX_STRING buffer;
const uint STATIC_SQL_LENGTH= 14;
DBUG_ENTER("Event_job_data::construct_drop_event_sql");
buffer.length= STATIC_SQL_LENGTH + name.length*2 + dbname.length*2;
if (! (buffer.str= (char*) thd->alloc(buffer.length)))
DBUG_RETURN(TRUE);
sp_sql->set(buffer.str, buffer.length, system_charset_info);
sp_sql->length(0);
2006-07-13 16:24:55 +02:00
sp_sql->append(C_STRING_WITH_LEN("DROP EVENT "));
append_identifier(thd, sp_sql, dbname.str, dbname.length);
sp_sql->append('.');
append_identifier(thd, sp_sql, name.str, name.length);
2006-07-13 16:24:55 +02:00
DBUG_RETURN(thd->is_fatal_error);
}
2006-07-13 16:24:55 +02:00
/**
Compiles and executes the event (the underlying sp_head object)
2006-07-13 16:24:55 +02:00
@retval TRUE error (reported to the error log)
@retval FALSE success
2006-07-13 16:24:55 +02:00
*/
bool
Event_job_data::execute(THD *thd, bool drop)
2006-07-13 16:24:55 +02:00
{
String sp_sql;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context event_sctx, *save_sctx= NULL;
#endif
List<Item> empty_item_list;
bool ret= TRUE;
2006-07-13 16:24:55 +02:00
DBUG_ENTER("Event_job_data::execute");
mysql_reset_thd_for_next_command(thd);
2006-07-13 16:24:55 +02:00
/*
MySQL parser currently assumes that current database is either
present in THD or all names in all statements are fully specified.
And yet not fully specified names inside stored programs must be
be supported, even if the current database is not set:
CREATE PROCEDURE db1.p1() BEGIN CREATE TABLE t1; END//
-- in this example t1 should be always created in db1 and the statement
must parse even if there is no current database.
To support this feature and still address the parser limitation,
we need to set the current database here.
We don't have to call mysql_change_db, since the checks performed
in it are unnecessary for the purpose of parsing, and
mysql_change_db will be invoked anyway later, to activate the
procedure database before it's executed.
2006-07-13 16:24:55 +02:00
*/
thd->set_db(dbname.str, dbname.length);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (event_sctx.change_security_context(thd,
&definer_user, &definer_host,
&dbname, &save_sctx))
2006-07-13 16:24:55 +02:00
{
sql_print_error("Event Scheduler: "
"[%s].[%s.%s] execution failed, "
"failed to authenticate the user.",
definer.str, dbname.str, name.str);
goto end_no_lex_start;
2006-07-13 16:24:55 +02:00
}
#endif
if (check_access(thd, EVENT_ACL, dbname.str,
0, 0, 0, is_schema_db(dbname.str)))
{
/*
This aspect of behavior is defined in the worklog,
and this is how triggers work too: if TRIGGER
privilege is revoked from trigger definer,
triggers are not executed.
*/
sql_print_error("Event Scheduler: "
"[%s].[%s.%s] execution failed, "
"user no longer has EVENT privilege.",
definer.str, dbname.str, name.str);
goto end_no_lex_start;
}
2006-07-13 16:24:55 +02:00
if (construct_sp_sql(thd, &sp_sql))
goto end_no_lex_start;
2006-07-13 16:24:55 +02:00
/*
Set up global thread attributes to reflect the properties of
this Event. We can simply reset these instead of usual
backup/restore employed in stored programs since we know that
this is a top level statement and the worker thread is
allocated exclusively to execute this event.
*/
thd->variables.sql_mode= sql_mode;
thd->variables.time_zone= time_zone;
/*
2007-05-16 14:05:19 +02:00
Peculiar initialization order is a crutch to avoid races in SHOW
PROCESSLIST which reads thd->{query/query_length} without a mutex.
*/
thd->query_length= 0;
thd->query= sp_sql.c_ptr_safe();
thd->query_length= sp_sql.length();
{
Lex_input_stream lip(thd, thd->query, thd->query_length);
lex_start(thd);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
if (parse_sql(thd, &lip, creation_ctx))
{
sql_print_error("Event Scheduler: "
"%serror during compilation of %s.%s",
thd->is_fatal_error ? "fatal " : "",
(const char *) dbname.str, (const char *) name.str);
goto end;
}
}
{
sp_head *sphead= thd->lex->sphead;
2006-02-28 12:08:13 +01:00
DBUG_ASSERT(sphead);
if (thd->enable_slow_log)
sphead->m_flags|= sp_head::LOG_SLOW_STATEMENTS;
sphead->m_flags|= sp_head::LOG_GENERAL_LOG;
sphead->set_info(0, 0, &thd->lex->sp_chistics, sql_mode);
Patch for the following bugs: - BUG#11986: Stored routines and triggers can fail if the code has a non-ascii symbol - BUG#16291: mysqldump corrupts string-constants with non-ascii-chars - BUG#19443: INFORMATION_SCHEMA does not support charsets properly - BUG#21249: Character set of SP-var can be ignored - BUG#25212: Character set of string constant is ignored (stored routines) - BUG#25221: Character set of string constant is ignored (triggers) There were a few general problems that caused these bugs: 1. Character set information of the original (definition) query for views, triggers, stored routines and events was lost. 2. mysqldump output query in client character set, which can be inappropriate to encode definition-query. 3. INFORMATION_SCHEMA used strings with mixed encodings to display object definition; 1. No query-definition-character set. In order to compile query into execution code, some extra data (such as environment variables or the database character set) is used. The problem here was that this context was not preserved. So, on the next load it can differ from the original one, thus the result will be different. The context contains the following data: - client character set; - connection collation (character set and collation); - collation of the owner database; The fix is to store this context and use it each time we parse (compile) and execute the object (stored routine, trigger, ...). 2. Wrong mysqldump-output. The original query can contain several encodings (by means of character set introducers). The problem here was that we tried to convert original query to the mysqldump-client character set. Moreover, we stored queries in different character sets for different objects (views, for one, used UTF8, triggers used original character set). The solution is - to store definition queries in the original character set; - to change SHOW CREATE statement to output definition query in the binary character set (i.e. without any conversion); - introduce SHOW CREATE TRIGGER statement; - to dump special statements to switch the context to the original one before dumping and restore it afterwards. Note, in order to preserve the database collation at the creation time, additional ALTER DATABASE might be used (to temporary switch the database collation back to the original value). In this case, ALTER DATABASE privilege will be required. This is a backward-incompatible change. 3. INFORMATION_SCHEMA showed non-UTF8 strings The fix is to generate UTF8-query during the parsing, store it in the object and show it in the INFORMATION_SCHEMA. Basically, the idea is to create a copy of the original query convert it to UTF8. Character set introducers are removed and all text literals are converted to UTF8. This UTF8 query is intended to provide user-readable output. It must not be used to recreate the object. Specialized SHOW CREATE statements should be used for this. The reason for this limitation is the following: the original query can contain symbols from several character sets (by means of character set introducers). Example: - original query: CREATE VIEW v1 AS SELECT _cp1251 'Hello' AS c1; - UTF8 query (for INFORMATION_SCHEMA): CREATE VIEW v1 AS SELECT 'Hello' AS c1;
2007-06-28 19:34:54 +02:00
sphead->set_creation_ctx(creation_ctx);
sphead->optimize();
ret= sphead->execute_procedure(thd, &empty_item_list);
/*
There is no pre-locking and therefore there should be no
tables open and locked left after execute_procedure.
*/
}
end:
if (thd->lex->sphead) /* NULL only if a parse error */
{
delete thd->lex->sphead;
thd->lex->sphead= NULL;
}
end_no_lex_start:
if (drop && !thd->is_fatal_error)
{
/*
We must do it here since here we're under the right authentication
ID of the event definer.
*/
sql_print_information("Event Scheduler: Dropping %s.%s",
(const char *) dbname.str, (const char *) name.str);
/*
Construct a query for the binary log, to ensure the event is dropped
on the slave
*/
if (construct_drop_event_sql(thd, &sp_sql))
ret= 1;
else
{
ulong saved_master_access;
/*
2007-05-16 14:05:19 +02:00
Peculiar initialization order is a crutch to avoid races in SHOW
PROCESSLIST which reads thd->{query/query_length} without a mutex.
*/
thd->query_length= 0;
thd->query= sp_sql.c_ptr_safe();
thd->query_length= sp_sql.length();
/*
NOTE: even if we run in read-only mode, we should be able to lock
the mysql.event table for writing. In order to achieve this, we
should call mysql_lock_tables() under the super-user.
*/
saved_master_access= thd->security_ctx->master_access;
thd->security_ctx->master_access |= SUPER_ACL;
ret= Events::drop_event(thd, dbname, name, FALSE);
thd->security_ctx->master_access= saved_master_access;
}
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (save_sctx)
event_sctx.restore_security_context(thd, save_sctx);
#endif
lex_end(thd->lex);
thd->lex->unit.cleanup();
thd->end_statement();
thd->cleanup_after_query();
/* Avoid races with SHOW PROCESSLIST */
thd->query_length= 0;
thd->query= NULL;
DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret));
DBUG_RETURN(ret);
}
/*
Checks whether two events are in the same schema
SYNOPSIS
event_basic_db_equal()
db Schema
et Compare et->dbname to `db`
RETURN VALUE
TRUE Equal
FALSE Not equal
*/
bool
event_basic_db_equal(LEX_STRING db, Event_basic *et)
{
return !sortcmp_lex_string(et->dbname, db, system_charset_info);
}
/*
Checks whether an event has equal `db` and `name`
SYNOPSIS
event_basic_identifier_equal()
db Schema
name Name
et The event object
RETURN VALUE
TRUE Equal
FALSE Not equal
*/
bool
event_basic_identifier_equal(LEX_STRING db, LEX_STRING name, Event_basic *b)
{
return !sortcmp_lex_string(name, b->name, system_charset_info) &&
!sortcmp_lex_string(db, b->dbname, system_charset_info);
}
/**
@} (End of group Event_Scheduler)
*/