mariadb/sql/sql_class.h

2566 lines
79 KiB
C
Raw Normal View History

/* Copyright (C) 2000-2006 MySQL AB
2000-07-31 21:29:14 +02:00
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
2000-07-31 21:29:14 +02:00
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
2000-07-31 21:29:14 +02:00
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* Classes in mysql */
#ifdef USE_PRAGMA_INTERFACE
2000-07-31 21:29:14 +02:00
#pragma interface /* gcc class implementation */
#endif
// TODO: create log.h and move all the log header stuff there
2000-07-31 21:29:14 +02:00
class Query_log_event;
class Load_log_event;
class Slave_log_event;
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
class Format_description_log_event;
class sp_rcontext;
class sp_cache;
class Parser_state;
2000-07-31 21:29:14 +02:00
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
2004-05-19 00:18:54 +02:00
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN};
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
DELAY_KEY_WRITE_ALL };
2000-07-31 21:29:14 +02:00
enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN,
CHECK_FIELD_ERROR_FOR_NULL };
2003-04-02 15:16:19 +02:00
extern char internal_table_name[2];
extern char empty_c_string[1];
2004-11-12 13:34:00 +01:00
extern const char **errmesg;
2003-04-02 15:16:19 +02:00
extern bool volatile shutdown_in_progress;
2005-01-16 13:16:23 +01:00
#define TC_LOG_PAGE_SIZE 8192
#define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE)
#define TC_HEURISTIC_RECOVER_COMMIT 1
#define TC_HEURISTIC_RECOVER_ROLLBACK 2
extern uint tc_heuristic_recover;
/*
Transaction Coordinator log - a base abstract class
for two different implementations
*/
class TC_LOG
{
public:
int using_heuristic_recover();
TC_LOG() {}
virtual ~TC_LOG() {}
virtual int open(const char *opt_name)=0;
virtual void close()=0;
virtual int log_xid(THD *thd, my_xid xid)=0;
2005-01-16 13:16:23 +01:00
virtual void unlog(ulong cookie, my_xid xid)=0;
};
class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging
{
public:
TC_LOG_DUMMY() {} /* Remove gcc warning */
2005-01-16 13:16:23 +01:00
int open(const char *opt_name) { return 0; }
void close() { }
int log_xid(THD *thd, my_xid xid) { return 1; }
2005-01-16 13:16:23 +01:00
void unlog(ulong cookie, my_xid xid) { }
};
2005-02-20 20:08:33 +01:00
#ifdef HAVE_MMAP
2005-01-16 13:16:23 +01:00
class TC_LOG_MMAP: public TC_LOG
{
2005-02-21 14:04:35 +01:00
public: // only to keep Sun Forte on sol9x86 happy
2005-01-16 13:16:23 +01:00
typedef enum {
POOL, // page is in pool
ERROR, // last sync failed
DIRTY // new xids added since last sync
} PAGE_STATE;
2005-02-21 14:04:35 +01:00
private:
2005-01-16 13:16:23 +01:00
typedef struct st_page {
struct st_page *next; // page a linked in a fifo queue
my_xid *start, *end; // usable area of a page
my_xid *ptr; // next xid will be written here
int size, free; // max and current number of free xid slots on the page
int waiters; // number of waiters on condition
PAGE_STATE state; // see above
pthread_mutex_t lock; // to access page data or control structure
pthread_cond_t cond; // to wait for a sync
} PAGE;
char logname[FN_REFLEN];
File fd;
2005-02-20 20:08:33 +01:00
my_off_t file_length;
uint npages, inited;
2005-01-16 13:16:23 +01:00
uchar *data;
struct st_page *pages, *syncing, *active, *pool, *pool_last;
/*
note that, e.g. LOCK_active is only used to protect
'active' pointer, to protect the content of the active page
one has to use active->lock.
Same for LOCK_pool and LOCK_sync
*/
2005-01-16 13:16:23 +01:00
pthread_mutex_t LOCK_active, LOCK_pool, LOCK_sync;
pthread_cond_t COND_pool, COND_active;
public:
TC_LOG_MMAP(): inited(0) {}
int open(const char *opt_name);
void close();
int log_xid(THD *thd, my_xid xid);
2005-01-16 13:16:23 +01:00
void unlog(ulong cookie, my_xid xid);
int recover();
private:
void get_active_from_pool();
int sync();
int overflow();
};
2005-02-20 20:08:33 +01:00
#else
#define TC_LOG_MMAP TC_LOG_DUMMY
#endif
2005-01-16 13:16:23 +01:00
extern TC_LOG *tc_log;
extern TC_LOG_MMAP tc_log_mmap;
extern TC_LOG_DUMMY tc_log_dummy;
/* log info errors */
2000-07-31 21:29:14 +02:00
#define LOG_INFO_EOF -1
#define LOG_INFO_IO -2
#define LOG_INFO_INVALID -3
#define LOG_INFO_SEEK -4
#define LOG_INFO_MEM -6
#define LOG_INFO_FATAL -7
#define LOG_INFO_IN_USE -8
2000-07-31 21:29:14 +02:00
/* bitmap to SQL_LOG::close() */
#define LOG_CLOSE_INDEX 1
#define LOG_CLOSE_TO_BE_OPENED 2
#define LOG_CLOSE_STOP_EVENT 4
struct st_relay_log_info;
2000-07-31 21:29:14 +02:00
typedef struct st_log_info
{
char log_file_name[FN_REFLEN];
my_off_t index_file_offset, index_file_start_offset;
my_off_t pos;
bool fatal; // if the purge happens to give us a negative offset
pthread_mutex_t lock;
st_log_info()
: index_file_offset(0), index_file_start_offset(0),
pos(0), fatal(0)
{
log_file_name[0] = '\0';
pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);
}
~st_log_info() { pthread_mutex_destroy(&lock);}
2000-07-31 21:29:14 +02:00
} LOG_INFO;
2003-01-30 18:39:54 +01:00
typedef struct st_user_var_events
{
user_var_entry *user_var_event;
char *value;
ulong length;
Item_result type;
uint charset_number;
} BINLOG_USER_VAR_EVENT;
#define RP_LOCK_LOG_IS_ALREADY_LOCKED 1
#define RP_FORCE_ROTATE 2
2001-09-30 21:04:56 +02:00
class Log_event;
2000-07-31 21:29:14 +02:00
2005-01-16 13:16:23 +01:00
/*
TODO split MYSQL_LOG into base MYSQL_LOG and
MYSQL_QUERY_LOG, MYSQL_SLOW_LOG, MYSQL_BIN_LOG
most of the code from MYSQL_LOG should be in the MYSQL_BIN_LOG
only (TC_LOG included)
TODO use mmap instead of IO_CACHE for binlog
(mmap+fsync is two times faster than write+fsync)
*/
class MYSQL_LOG: public TC_LOG
{
2000-07-31 21:29:14 +02:00
private:
/* LOCK_log and LOCK_index are inited by init_pthread_objects() */
pthread_mutex_t LOCK_log, LOCK_index;
pthread_mutex_t LOCK_prep_xids;
pthread_cond_t COND_prep_xids;
pthread_cond_t update_cond;
ulonglong bytes_written;
2000-07-31 21:29:14 +02:00
time_t last_time,query_start;
IO_CACHE log_file;
IO_CACHE index_file;
/*
purge_temp is a temp file used in purge_logs so that the index file
can be updated before deleting files from disk, yielding better crash
recovery. It is created on demand the first time purge_logs is called
and then reused for subsequent calls. It is cleaned up in cleanup().
*/
IO_CACHE purge_temp;
2000-07-31 21:29:14 +02:00
char *name;
2006-09-27 16:21:29 +02:00
char time_buff[20],db[NAME_LEN+1];
2000-07-31 21:29:14 +02:00
char log_file_name[FN_REFLEN],index_file_name[FN_REFLEN];
2005-01-16 13:16:23 +01:00
/*
The max size before rotation (usable only if log_type == LOG_BIN: binary
logs and relay logs).
For a binlog, max_size should be max_binlog_size.
For a relay log, it should be max_relay_log_size if this is non-zero,
max_binlog_size otherwise.
max_size is set in init(), and dynamically changed (when one does SET
GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) by fix_max_binlog_size and
2005-01-16 13:16:23 +01:00
fix_max_relay_log_size).
*/
ulong max_size;
2005-01-16 13:16:23 +01:00
ulong prepared_xids; /* for tc log - number of xids to remember */
volatile enum_log_type log_type;
enum cache_type io_cache_type;
// current file sequence number for load data infile binary logging
uint file_id;
uint open_count; // For replication
int readers_count;
bool write_error, inited;
bool need_start_event;
/*
no_auto_events means we don't want any of these automatic events :
Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't
want a Rotate_log event to be written to the relay log. When we start a
relay log etc. So in 4.x this is 1 for relay logs, 0 for binlogs.
In 5.0 it's 0 for relay logs too!
*/
bool no_auto_events;
friend class Log_event;
2000-07-31 21:29:14 +02:00
public:
/*
These describe the log's format. This is used only for relay logs.
_for_exec is used by the SQL thread, _for_queue by the I/O thread. It's
necessary to have 2 distinct objects, because the I/O thread may be reading
events in a different format from what the SQL thread is reading (consider
the case of a master which has been upgraded from 5.0 to 5.1 without doing
RESET MASTER, or from 4.x to 5.0).
*/
Format_description_log_event *description_event_for_exec,
*description_event_for_queue;
2000-07-31 21:29:14 +02:00
MYSQL_LOG();
/*
note that there's no destructor ~MYSQL_LOG() !
The reason is that we don't want it to be automatically called
on exit() - but only during the correct shutdown process
*/
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
2005-01-16 13:16:23 +01:00
int open(const char *opt_name);
void close();
int log_xid(THD *thd, my_xid xid);
2005-01-16 13:16:23 +01:00
void unlog(ulong cookie, my_xid xid);
int recover(IO_CACHE *log, Format_description_log_event *fdle);
void reset_bytes_written()
{
bytes_written = 0;
}
void harvest_bytes_written(ulonglong* counter)
{
#ifndef DBUG_OFF
char buf1[22],buf2[22];
#endif
DBUG_ENTER("harvest_bytes_written");
(*counter)+=bytes_written;
DBUG_PRINT("info",("counter: %s bytes_written: %s", llstr(*counter,buf1),
llstr(bytes_written,buf2)));
bytes_written=0;
DBUG_VOID_RETURN;
}
void set_max_size(ulong max_size_arg);
void signal_update();
void wait_for_update(THD* thd, bool master_or_slave);
void set_need_start_event() { need_start_event = 1; }
void init(enum_log_type log_type_arg,
enum cache_type io_cache_type_arg,
bool no_auto_events_arg, ulong max_size);
void init_pthread_objects();
void cleanup();
bool open(const char *log_name,
enum_log_type log_type,
const char *new_name,
enum cache_type io_cache_type_arg,
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
bool no_auto_events_arg, ulong max_size,
bool null_created);
const char *generate_name(const char *log_name, const char *suffix,
bool strip_ext, char *buff);
/* simplified open_xxx wrappers for the gigantic open above */
bool open_query_log(const char *log_name)
{
char buf[FN_REFLEN];
return open(generate_name(log_name, ".log", 0, buf),
2005-03-08 17:12:12 +01:00
LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0);
}
bool open_slow_log(const char *log_name)
{
char buf[FN_REFLEN];
return open(generate_name(log_name, "-slow.log", 0, buf),
2005-03-08 17:12:12 +01:00
LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0);
}
bool open_index_file(const char *index_file_name_arg,
const char *log_name);
void new_file(bool need_lock);
bool write(THD *thd, enum enum_server_command command,
const char *format, ...) ATTRIBUTE_FORMAT(printf, 4, 5);
bool write(THD *thd, const char *query, uint query_length,
time_t query_start=0);
bool write(Log_event* event_info); // binary log write
bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event);
2007-02-23 18:58:56 +01:00
void start_union_events(THD *thd, query_id_t query_id_param);
void stop_union_events(THD *thd);
bool is_query_in_union(THD *thd, query_id_t query_id_param);
2002-01-30 15:37:47 +01:00
/*
v stands for vector
invoked as appendv(buf1,len1,buf2,len2,...,bufn,lenn,0)
*/
bool appendv(const char* buf,uint len,...);
bool append(Log_event* ev);
2000-07-31 21:29:14 +02:00
int generate_new_name(char *new_name,const char *old_name);
void make_log_name(char* buf, const char* log_ident);
bool is_active(const char* log_file_name);
int update_log_index(LOG_INFO* linfo, bool need_update_threads);
void rotate_and_purge(uint flags);
bool flush_and_sync();
int purge_logs(const char *to_log, bool included,
bool need_mutex, bool need_update_threads,
ulonglong *decrease_log_space);
int purge_logs_before_date(time_t purge_time);
int purge_first_log(struct st_relay_log_info* rli, bool included);
bool reset_logs(THD* thd);
void close(uint exiting);
2000-07-31 21:29:14 +02:00
// iterating through the log index file
int find_log_pos(LOG_INFO* linfo, const char* log_name,
bool need_mutex);
int find_next_log(LOG_INFO* linfo, bool need_mutex);
2000-07-31 21:29:14 +02:00
int get_current_log(LOG_INFO* linfo);
int raw_get_current_log(LOG_INFO* linfo);
2001-08-04 13:48:58 +02:00
uint next_file_id();
inline bool is_open() { return log_type != LOG_CLOSED; }
inline char* get_index_fname() { return index_file_name;}
inline char* get_log_fname() { return log_file_name; }
inline char* get_name() { return name; }
inline pthread_mutex_t* get_log_lock() { return &LOCK_log; }
inline IO_CACHE* get_log_file() { return &log_file; }
inline void lock_index() { pthread_mutex_lock(&LOCK_index);}
inline void unlock_index() { pthread_mutex_unlock(&LOCK_index);}
inline IO_CACHE *get_index_file() { return &index_file;}
inline uint32 get_open_count() { return open_count; }
2000-07-31 21:29:14 +02:00
};
/*
The COPY_INFO structure is used by INSERT/REPLACE code.
The schema of the row counting by the INSERT/INSERT ... ON DUPLICATE KEY
UPDATE code:
If a row is inserted then the copied variable is incremented.
If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the
new data differs from the old one then the copied and the updated
variables are incremented.
The touched variable is incremented if a row was touched by the update part
of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row
was actually changed or not.
*/
2000-07-31 21:29:14 +02:00
typedef struct st_copy_info {
ha_rows records; /* Number of processed records */
ha_rows deleted; /* Number of deleted records */
ha_rows updated; /* Number of updated records */
ha_rows copied; /* Number of copied records */
ha_rows error_count;
ha_rows touched; /* Number of touched records */
2000-07-31 21:29:14 +02:00
enum enum_duplicates handle_duplicates;
int escape_char, last_errno;
bool ignore;
/* for INSERT ... UPDATE */
2002-12-02 20:38:00 +01:00
List<Item> *update_fields;
List<Item> *update_values;
/* for VIEW ... WITH CHECK OPTION */
TABLE_LIST *view;
2000-07-31 21:29:14 +02:00
} COPY_INFO;
class key_part_spec :public Sql_alloc {
public:
const char *field_name;
uint length;
key_part_spec(const char *name,uint len=0) :field_name(name), length(len) {}
bool operator==(const key_part_spec& other) const;
2000-07-31 21:29:14 +02:00
};
class Alter_drop :public Sql_alloc {
public:
enum drop_type {KEY, COLUMN };
const char *name;
enum drop_type type;
Alter_drop(enum drop_type par_type,const char *par_name)
:name(par_name), type(par_type) {}
};
class Alter_column :public Sql_alloc {
public:
const char *name;
Item *def;
Alter_column(const char *par_name,Item *literal)
:name(par_name), def(literal) {}
};
class Key :public Sql_alloc {
public:
enum Keytype { PRIMARY, UNIQUE, MULTIPLE, FULLTEXT, SPATIAL, FOREIGN_KEY};
2000-07-31 21:29:14 +02:00
enum Keytype type;
enum ha_key_alg algorithm;
2000-07-31 21:29:14 +02:00
List<key_part_spec> columns;
const char *name;
bool generated;
2000-07-31 21:29:14 +02:00
Key(enum Keytype type_par, const char *name_arg, enum ha_key_alg alg_par,
bool generated_arg, List<key_part_spec> &cols)
:type(type_par), algorithm(alg_par), columns(cols), name(name_arg),
generated(generated_arg)
{}
2000-07-31 21:29:14 +02:00
~Key() {}
/* Equality comparison of keys (ignoring name) */
friend bool foreign_key_prefix(Key *a, Key *b);
2000-07-31 21:29:14 +02:00
};
class Table_ident;
class foreign_key: public Key {
public:
enum fk_match_opt { FK_MATCH_UNDEF, FK_MATCH_FULL,
FK_MATCH_PARTIAL, FK_MATCH_SIMPLE};
enum fk_option { FK_OPTION_UNDEF, FK_OPTION_RESTRICT, FK_OPTION_CASCADE,
FK_OPTION_SET_NULL, FK_OPTION_NO_ACTION, FK_OPTION_DEFAULT};
Table_ident *ref_table;
List<key_part_spec> ref_columns;
uint delete_opt, update_opt, match_opt;
foreign_key(const char *name_arg, List<key_part_spec> &cols,
Table_ident *table, List<key_part_spec> &ref_cols,
uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg)
:Key(FOREIGN_KEY, name_arg, HA_KEY_ALG_UNDEF, 0, cols),
ref_table(table), ref_columns(ref_cols),
delete_opt(delete_opt_arg), update_opt(update_opt_arg),
match_opt(match_opt_arg)
{}
};
2000-07-31 21:29:14 +02:00
typedef struct st_mysql_lock
{
TABLE **table;
uint table_count,lock_count;
THR_LOCK_DATA **locks;
} MYSQL_LOCK;
class LEX_COLUMN : public Sql_alloc
{
public:
String column;
uint rights;
LEX_COLUMN (const String& x,const uint& y ): column (x),rights (y) {}
};
#include "sql_lex.h" /* Must be here */
/* Needed to be able to have an I_List of char* strings in mysqld.cc. */
2000-07-31 21:29:14 +02:00
class i_string: public ilink
{
public:
char* ptr;
i_string():ptr(0) { }
i_string(char* s) : ptr(s) {}
};
/* needed for linked list of two strings for replicate-rewrite-db */
class i_string_pair: public ilink
{
public:
char* key;
char* val;
i_string_pair():key(0),val(0) { }
i_string_pair(char* key_arg, char* val_arg) : key(key_arg),val(val_arg) {}
};
class Delayed_insert;
class select_result;
2000-07-31 21:29:14 +02:00
#define THD_SENTRY_MAGIC 0xfeedd1ff
#define THD_SENTRY_GONE 0xdeadbeef
#define THD_CHECK_SENTRY(thd) DBUG_ASSERT(thd->dbug_sentry == THD_SENTRY_MAGIC)
struct system_variables
{
ulonglong myisam_max_extra_sort_file_size;
ulonglong myisam_max_sort_file_size;
ulonglong max_heap_table_size;
ulonglong tmp_table_size;
ha_rows select_limit;
ha_rows max_join_size;
ulong auto_increment_increment, auto_increment_offset;
ulong bulk_insert_buff_size;
ulong join_buff_size;
ulong long_query_time;
ulong max_allowed_packet;
ulong max_error_count;
ulong max_length_for_sort_data;
ulong max_sort_length;
ulong max_tmp_tables;
ulong max_insert_delayed_threads;
ulong multi_range_count;
2003-05-04 18:43:37 +02:00
ulong myisam_repair_threads;
ulong myisam_sort_buff_size;
ulong myisam_stats_method;
ulong net_buffer_length;
ulong net_interactive_timeout;
ulong net_read_timeout;
ulong net_retry_count;
ulong net_wait_timeout;
ulong net_write_timeout;
ulong optimizer_prune_level;
ulong optimizer_search_depth;
ulong preload_buff_size;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
ulong profiling_history_size;
ulong query_cache_type;
ulong read_buff_size;
ulong read_rnd_buff_size;
ulong div_precincrement;
ulong sortbuff_size;
ulong table_type;
ulong tx_isolation;
2005-02-11 22:33:52 +01:00
ulong completion_type;
/* Determines which non-standard SQL behaviour should be enabled */
ulong sql_mode;
ulong max_sp_recursion_depth;
2004-07-16 00:15:55 +02:00
/* check of key presence in updatable view */
ulong updatable_views_with_limit;
2003-04-02 15:16:19 +02:00
ulong default_week_format;
ulong max_seeks_for_key;
ulong range_alloc_block_size;
ulong query_alloc_block_size;
ulong query_prealloc_size;
ulong trans_alloc_block_size;
ulong trans_prealloc_size;
ulong log_warnings;
ulong group_concat_max_len;
2002-12-29 22:46:48 +01:00
/*
In slave thread we need to know in behalf of which
thread the query is being run to replicate temp tables properly
*/
ulong pseudo_thread_id;
2003-04-02 15:16:19 +02:00
my_bool low_priority_updates;
my_bool new_mode;
my_bool query_cache_wlock_invalidate;
2005-02-11 22:05:24 +01:00
my_bool engine_condition_pushdown;
my_bool keep_files_on_create;
2006-04-13 09:52:56 +02:00
#ifdef HAVE_INNOBASE_DB
my_bool innodb_table_locks;
my_bool innodb_support_xa;
#endif /* HAVE_INNOBASE_DB */
#ifdef HAVE_NDBCLUSTER_DB
ulong ndb_autoincrement_prefetch_sz;
my_bool ndb_force_send;
my_bool ndb_use_exact_count;
my_bool ndb_use_transactions;
#endif /* HAVE_NDBCLUSTER_DB */
my_bool old_passwords;
/* Only charset part of these variables is sensible */
CHARSET_INFO *character_set_filesystem;
CHARSET_INFO *character_set_client;
CHARSET_INFO *character_set_results;
/* Both charset and collation parts of these variables are important */
CHARSET_INFO *collation_server;
CHARSET_INFO *collation_database;
CHARSET_INFO *collation_connection;
/* Locale Support */
MY_LOCALE *lc_time_names;
Time_zone *time_zone;
/* DATE, DATETIME and MYSQL_TIME formats */
DATE_TIME_FORMAT *date_format;
DATE_TIME_FORMAT *datetime_format;
DATE_TIME_FORMAT *time_format;
my_bool sysdate_is_now;
};
/* per thread status variables */
typedef struct system_status_var
{
ulonglong bytes_received;
ulonglong bytes_sent;
ulong com_other;
ulong com_stat[(uint) SQLCOM_END];
ulong created_tmp_disk_tables;
ulong created_tmp_tables;
ulong ha_commit_count;
ulong ha_delete_count;
ulong ha_read_first_count;
ulong ha_read_last_count;
ulong ha_read_key_count;
ulong ha_read_next_count;
ulong ha_read_prev_count;
ulong ha_read_rnd_count;
ulong ha_read_rnd_next_count;
ulong ha_rollback_count;
ulong ha_update_count;
ulong ha_write_count;
2005-01-16 13:16:23 +01:00
ulong ha_prepare_count;
ulong ha_discover_count;
ulong ha_savepoint_count;
ulong ha_savepoint_rollback_count;
/* KEY_CACHE parts. These are copies of the original */
ulong key_blocks_changed;
ulong key_blocks_used;
ulong key_cache_r_requests;
ulong key_cache_read;
ulong key_cache_w_requests;
ulong key_cache_write;
/* END OF KEY_CACHE parts */
ulong net_big_packet_count;
ulong opened_tables;
ulong select_full_join_count;
ulong select_full_range_join_count;
ulong select_range_count;
ulong select_range_check_count;
ulong select_scan_count;
ulong long_query_count;
ulong filesort_merge_passes;
ulong filesort_range_count;
ulong filesort_rows;
ulong filesort_scan_count;
/* Prepared statements and binary protocol */
2005-06-16 22:34:35 +02:00
ulong com_stmt_prepare;
ulong com_stmt_execute;
ulong com_stmt_send_long_data;
ulong com_stmt_fetch;
ulong com_stmt_reset;
ulong com_stmt_close;
/*
Number of statements sent from the client
*/
ulong questions;
/*
IMPORTANT!
SEE last_system_status_var DEFINITION BELOW.
Below 'last_system_status_var' are all variables which doesn't make any
sense to add to the /global/ status variable counter.
*/
double last_query_cost;
} STATUS_VAR;
/*
This is used for 'SHOW STATUS'. It must be updated to the last ulong
variable in system_status_var which is makes sens to add to the global
counter
*/
#define last_system_status_var questions
void free_tmp_table(THD *thd, TABLE *entry);
/* The following macro is to make init of Query_arena simpler */
#ifndef DBUG_OFF
#define INIT_ARENA_DBUG_INFO is_backup_arena= 0
#else
#define INIT_ARENA_DBUG_INFO
#endif
class Query_arena
{
public:
/*
List of items created in the parser for this query. Every item puts
itself to the list on creation (see Item::Item() for details))
*/
Item *free_list;
MEM_ROOT *mem_root; // Pointer to current memroot
#ifndef DBUG_OFF
bool is_backup_arena; /* True if this arena is used for backup. */
#endif
/*
The states relfects three diffrent life cycles for three
different types of statements:
Prepared statement: INITIALIZED -> PREPARED -> EXECUTED.
Stored procedure: INITIALIZED_FOR_SP -> EXECUTED.
Other statements: CONVENTIONAL_EXECUTION never changes.
*/
enum enum_state
{
INITIALIZED= 0, INITIALIZED_FOR_SP= 1, PREPARED= 2,
CONVENTIONAL_EXECUTION= 3, EXECUTED= 4, ERROR= -1
};
2004-08-31 12:07:02 +02:00
enum_state state;
/* We build without RTTI, so dynamic_cast can't be used. */
enum Type
{
STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE
};
Query_arena(MEM_ROOT *mem_root_arg, enum enum_state state_arg) :
free_list(0), mem_root(mem_root_arg), state(state_arg)
{ INIT_ARENA_DBUG_INFO; }
/*
This constructor is used only when Query_arena is created as
backup storage for another instance of Query_arena.
*/
Query_arena() { INIT_ARENA_DBUG_INFO; }
virtual Type type() const;
virtual ~Query_arena() {};
inline bool is_stmt_prepare() const { return state == INITIALIZED; }
inline bool is_first_sp_execute() const
{ return state == INITIALIZED_FOR_SP; }
inline bool is_stmt_prepare_or_first_sp_execute() const
{ return (int)state < (int)PREPARED; }
inline bool is_stmt_prepare_or_first_stmt_execute() const
{ return (int)state <= (int)PREPARED; }
2004-08-31 12:07:02 +02:00
inline bool is_first_stmt_execute() const { return state == PREPARED; }
inline bool is_stmt_execute() const
{ return state == PREPARED || state == EXECUTED; }
inline bool is_conventional() const
2004-09-10 01:22:44 +02:00
{ return state == CONVENTIONAL_EXECUTION; }
inline gptr alloc(unsigned int size) { return alloc_root(mem_root,size); }
inline gptr calloc(unsigned int size)
{
gptr ptr;
if ((ptr=alloc_root(mem_root,size)))
bzero((char*) ptr,size);
return ptr;
}
inline char *strdup(const char *str)
{ return strdup_root(mem_root,str); }
inline char *strmake(const char *str, uint size)
{ return strmake_root(mem_root,str,size); }
inline char *memdup(const char *str, uint size)
{ return memdup_root(mem_root,str,size); }
inline char *memdup_w_gap(const char *str, uint size, uint gap)
{
gptr ptr;
if ((ptr=alloc_root(mem_root,size+gap)))
memcpy(ptr,str,size);
return ptr;
}
void set_query_arena(Query_arena *set);
void free_items();
/* Close the active state associated with execution of this statement */
virtual void cleanup_stmt();
};
class Server_side_cursor;
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class Statement
@brief State of a single command executed against this connection.
One connection can contain a lot of simultaneously running statements,
some of which could be:
- prepared, that is, contain placeholders,
- opened as cursors. We maintain 1 to 1 relationship between
statement and cursor - if user wants to create another cursor for his
2006-04-13 09:50:33 +02:00
query, we create another statement for it.
To perform some action with statement we reset THD part to the state of
that statement, do the action, and then save back modified state from THD
to the statement. It will be changed in near future, and Statement will
be used explicitly.
*/
class Statement: public ilink, public Query_arena
{
Statement(const Statement &rhs); /* not implemented: */
Statement &operator=(const Statement &rhs); /* non-copyable */
public:
/*
2003-12-04 20:08:26 +01:00
Uniquely identifies each statement object in thread scope; change during
statement lifetime. FIXME: must be const
*/
2003-12-04 20:08:26 +01:00
ulong id;
/*
2006-04-13 09:50:33 +02:00
- if set_query_id=1, we set field->query_id for all fields. In that case
field list can not contain duplicates.
*/
bool set_query_id;
LEX_STRING name; /* name for named prepared statements */
LEX *lex; // parse tree descriptor
/*
Points to the query associated with this statement. It's const, but
we need to declare it char * because all table handlers are written
in C and need to point to it.
Note that if we set query = NULL, we must at the same time set
query_length = 0, and protect the whole operation with
LOCK_thd_data mutex. To avoid crashes in races, if we do not
know that thd->query cannot change at the moment, we should print
thd->query like this:
(1) reserve the LOCK_thd_data mutex;
(2) print or copy the value of query and query_length
(3) release LOCK_thd_data mutex.
This printing is needed at least in SHOW PROCESSLIST and SHOW
ENGINE INNODB STATUS.
*/
char *query;
uint32 query_length; // current query length
Server_side_cursor *cursor;
public:
2005-06-22 21:12:25 +02:00
/* This constructor is called for backup statements */
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
Statement() {}
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
enum enum_state state_arg, ulong id_arg);
virtual ~Statement();
/* Assign execution context (note: not all members) of given stmt to self */
virtual void set_statement(Statement *stmt);
void set_n_backup_statement(Statement *stmt, Statement *backup);
void restore_backup_statement(Statement *stmt, Statement *backup);
/* return class type */
virtual Type type() const;
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
Container for all statements created/used in a connection.
Statements in Statement_map have unique Statement::id (guaranteed by id
assignment in Statement::Statement)
Non-empty statement names are unique too: attempt to insert a new statement
with duplicate name causes older statement to be deleted
Statements are auto-deleted when they are removed from the map and when the
map is deleted.
*/
class Statement_map
{
public:
Statement_map();
int insert(THD *thd, Statement *statement);
Statement *find_by_name(LEX_STRING *name)
{
Statement *stmt;
stmt= (Statement*)hash_search(&names_hash, (byte*)name->str,
name->length);
return stmt;
}
Statement *find(ulong id)
{
if (last_found_statement == 0 || id != last_found_statement->id)
{
Statement *stmt;
stmt= (Statement *) hash_search(&st_hash, (byte *) &id, sizeof(id));
if (stmt && stmt->name.str)
return NULL;
last_found_statement= stmt;
}
return last_found_statement;
}
/*
Close all cursors of this connection that use tables of a storage
engine that has transaction-specific state and therefore can not
survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
*/
void close_transient_cursors();
void erase(Statement *statement);
/* Erase all statements (calls Statement destructor) */
void reset();
~Statement_map();
private:
HASH st_hash;
HASH names_hash;
I_List<Statement> transient_cursor_list;
Statement *last_found_statement;
};
2005-01-16 13:16:23 +01:00
struct st_savepoint {
struct st_savepoint *prev;
char *name;
uint length, nht;
};
enum xa_states {XA_NOTR=0, XA_ACTIVE, XA_IDLE, XA_PREPARED, XA_ROLLBACK_ONLY};
extern const char *xa_state_names[];
typedef struct st_xid_state {
/* For now, this is only used to catch duplicated external xids */
XID xid; // transaction identifier
enum xa_states xa_state; // used by external XA only
bool in_thd;
/* Error reported by the Resource Manager (RM) to the Transaction Manager. */
uint rm_error;
} XID_STATE;
extern pthread_mutex_t LOCK_xid_cache;
extern HASH xid_cache;
bool xid_cache_init(void);
void xid_cache_free(void);
XID_STATE *xid_cache_search(XID *xid);
bool xid_cache_insert(XID *xid, enum xa_states xa_state);
bool xid_cache_insert(XID_STATE *xid_state);
void xid_cache_delete(XID_STATE *xid_state);
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class Security_context
@brief A set of THD members describing the current authenticated user.
*/
class Security_context {
public:
Security_context() {} /* Remove gcc warning */
/*
host - host of the client
user - user of the client, set to NULL until the user has been read from
the connection
priv_user - The user privilege we are using. May be "" for anonymous user.
ip - client IP
*/
char *host, *user, *priv_user, *ip;
/* The host privilege we are using */
char priv_host[MAX_HOSTNAME];
/* points to host if host is available, otherwise points to ip */
const char *host_or_ip;
ulong master_access; /* Global privileges from mysql.user */
ulong db_access; /* Privileges for current db */
void init();
void destroy();
void skip_grants();
inline char *priv_host_name()
{
return (*priv_host ? priv_host : (char *)"%");
}
bool user_matches(Security_context *);
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
A registry for item tree transformations performed during
query optimization. We register only those changes which require
a rollback to re-execute a prepared statement or stored procedure
yet another time.
*/
struct Item_change_record;
typedef I_List<Item_change_record> Item_change_list;
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
Type of prelocked mode.
See comment for THD::prelocked_mode for complete description.
*/
enum prelocked_mode_type {NON_PRELOCKED= 0, PRELOCKED= 1,
PRELOCKED_UNDER_LOCK_TABLES= 2};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
Class that holds information about tables which were opened and locked
by the thread. It is also used to save/restore this information in
push_open_tables_state()/pop_open_tables_state().
*/
class Open_tables_state
{
public:
2007-07-27 14:37:29 +02:00
/**
List of regular tables in use by this thread. Contains temporary and
base tables that were opened with @see open_tables().
*/
TABLE *open_tables;
/**
List of temporary tables used by this thread. Contains user-level
temporary tables, created with CREATE TEMPORARY TABLE, and
internal temporary tables, created, e.g., to resolve a SELECT,
or for an intermediate table used in ALTER.
XXX Why are internal temporary tables added to this list?
*/
TABLE *temporary_tables;
/**
List of tables that were opened with HANDLER OPEN and are
still in use by this thread.
*/
2007-07-27 14:37:29 +02:00
TABLE *handler_tables;
TABLE *derived_tables;
/*
During a MySQL session, one can lock tables in two modes: automatic
or manual. In automatic mode all necessary tables are locked just before
statement execution, and all acquired locks are stored in 'lock'
member. Unlocking takes place automatically as well, when the
statement ends.
Manual mode comes into play when a user issues a 'LOCK TABLES'
statement. In this mode the user can only use the locked tables.
Trying to use any other tables will give an error. The locked tables are
stored in 'locked_tables' member. Manual locking is described in
the 'LOCK_TABLES' chapter of the MySQL manual.
See also lock_tables() for details.
*/
MYSQL_LOCK *lock;
/*
Tables that were locked with explicit or implicit LOCK TABLES.
(Implicit LOCK TABLES happens when we are prelocking tables for
execution of statement which uses stored routines. See description
THD::prelocked_mode for more info.)
*/
MYSQL_LOCK *locked_tables;
/*
prelocked_mode_type enum and prelocked_mode member are used for
indicating whenever "prelocked mode" is on, and what type of
"prelocked mode" is it.
Prelocked mode is used for execution of queries which explicitly
or implicitly (via views or triggers) use functions, thus may need
some additional tables (mentioned in query table list) for their
execution.
First open_tables() call for such query will analyse all functions
used by it and add all additional tables to table its list. It will
also mark this query as requiring prelocking. After that lock_tables()
will issue implicit LOCK TABLES for the whole table list and change
thd::prelocked_mode to non-0. All queries called in functions invoked
by the main query will use prelocked tables. Non-0 prelocked_mode
will also surpress mentioned analysys in those queries thus saving
cycles. Prelocked mode will be turned off once close_thread_tables()
for the main query will be called.
Note: Since not all "tables" present in table list are really locked
thd::prelocked_mode does not imply thd::locked_tables.
*/
prelocked_mode_type prelocked_mode;
ulong version;
uint current_tablenr;
/*
This constructor serves for creation of Open_tables_state instances
which are used as backup storage.
*/
Open_tables_state() {};
Open_tables_state(ulong version_arg);
void set_open_tables_state(Open_tables_state *state)
{
*this= *state;
}
void reset_open_tables_state()
{
open_tables= temporary_tables= handler_tables= derived_tables= 0;
lock= locked_tables= 0;
prelocked_mode= NON_PRELOCKED;
}
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class Sub_statement_state
@brief Used to save context when executing a function or trigger
*/
/* Defines used for Sub_statement_state::in_sub_stmt */
#define SUB_STMT_TRIGGER 1
#define SUB_STMT_FUNCTION 2
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
class Sub_statement_state
{
public:
ulonglong options;
2006-06-16 11:05:58 +02:00
ulonglong last_insert_id, next_insert_id, current_insert_id;
ulonglong limit_found_rows;
ha_rows cuted_fields, sent_row_count, examined_row_count;
ulong client_capabilities;
uint in_sub_stmt;
bool enable_slow_log, insert_id_used, clear_next_insert_id;
2006-06-16 11:05:58 +02:00
bool last_insert_id_used;
my_bool no_send_ok;
SAVEPOINT *savepoints;
};
Bug#8407 (Stored functions/triggers ignore exception handler) Bug 18914 (Calling certain SPs from triggers fail) Bug 20713 (Functions will not not continue for SQLSTATE VALUE '42S02') Bug 21825 (Incorrect message error deleting records in a table with a trigger for inserting) Bug 22580 (DROP TABLE in nested stored procedure causes strange dependency error) Bug 25345 (Cursors from Functions) This fix resolves a long standing issue originally reported with bug 8407, which affect the behavior of Stored Procedures, Stored Functions and Trigger in many different ways, causing symptoms reported by all the bugs listed. In all cases, the root cause of the problem traces back to 8407 and how the server locks tables involved with sub statements. Prior to this fix, the implementation of stored routines would: - compute the transitive closure of all the tables referenced by a top level statement - open and lock all the tables involved - execute the top level statement "transitive closure of tables" means collecting: - all the tables, - all the stored functions, - all the views, - all the table triggers - all the stored procedures involved, and recursively inspect these objects definition to find more references to more objects, until the list of every object referenced does not grow any more. This mechanism is known as "pre-locking" tables before execution. The motivation for locking all the tables (possibly) used at once is to prevent dead locks. One problem with this approach is that, if the execution path the code really takes during runtime does not use a given table, and if the table is missing, the server would not execute the statement. This in particular has a major impact on triggers, since a missing table referenced by an update/delete trigger would prevent an insert trigger to run. Another problem is that stored routines might define SQL exception handlers to deal with missing tables, but the server implementation would never give user code a chance to execute this logic, since the routine is never executed when a missing table cause the pre-locking code to fail. With this fix, the internal implementation of the pre-locking code has been relaxed of some constraints, so that failure to open a table does not necessarily prevent execution of a stored routine. In particular, the pre-locking mechanism is now behaving as follows: 1) the first step, to compute the transitive closure of all the tables possibly referenced by a statement, is unchanged. 2) the next step, which is to open all the tables involved, only attempts to open the tables added by the pre-locking code, but silently fails without reporting any error or invoking any exception handler is the table is not present. This is achieved by trapping internal errors with Prelock_error_handler 3) the locking step only locks tables that were successfully opened. 4) when executing sub statements, the list of tables used by each statements is evaluated as before. The tables needed by the sub statement are expected to be already opened and locked. Statement referencing tables that were not opened in step 2) will fail to find the table in the open list, and only at this point will execution of the user code fail. 5) when a runtime exception is raised at 4), the instruction continuation destination (the next instruction to execute in case of SQL continue handlers) is evaluated. This is achieved with sp_instr::exec_open_and_lock_tables() 6) if a user exception handler is present in the stored routine, that handler is invoked as usual, so that ER_NO_SUCH_TABLE exceptions can be trapped by stored routines. If no handler exists, then the runtime execution will fail as expected. With all these changes, a side effect is that view security is impacted, in two different ways. First, a view defined as "select stored_function()", where the stored function references a table that may not exist, is considered valid. The rationale is that, because the stored function might trap exceptions during execution and still return a valid result, there is no way to decide when the view is created if a missing table really cause the view to be invalid. Secondly, testing for existence of tables is now done later during execution. View security, which consist of trapping errors and return a generic ER_VIEW_INVALID (to prevent disclosing information) was only implemented at very specific phases covering *opening* tables, but not covering the runtime execution. Because of this existing limitation, errors that were previously trapped and converted into ER_VIEW_INVALID are not trapped, causing table names to be reported to the user. This change is exposing an existing problem, which is independent and will be resolved separately.
2007-03-06 03:42:07 +01:00
/**
This class represents the interface for internal error handlers.
Internal error handlers are exception handlers used by the server
implementation.
*/
class Internal_error_handler
{
protected:
Internal_error_handler() {}
virtual ~Internal_error_handler() {}
public:
/**
Handle an error condition.
This method can be implemented by a subclass to achieve any of the
following:
- mask an error internally, prevent exposing it to the user,
- mask an error and throw another one instead.
When this method returns true, the error condition is considered
'handled', and will not be propagated to upper layers.
It is the responsability of the code installing an internal handler
to then check for trapped conditions, and implement logic to recover
from the anticipated conditions trapped during runtime.
This mechanism is similar to C++ try/throw/catch:
- 'try' correspond to <code>THD::push_internal_handler()</code>,
- 'throw' correspond to <code>my_error()</code>,
which invokes <code>my_message_sql()</code>,
- 'catch' correspond to checking how/if an internal handler was invoked,
before removing it from the exception stack with
<code>THD::pop_internal_handler()</code>.
@param sql_errno the error number
@param level the error level
@param thd the calling thread
@return true if the error is handled
*/
virtual bool handle_error(uint sql_errno,
MYSQL_ERROR::enum_warning_level level,
THD *thd) = 0;
};
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
@class THD
For each client connection we create a separate thread with THD serving as
a thread/connection descriptor
*/
class THD :public Statement,
public Open_tables_state
{
2000-07-31 21:29:14 +02:00
public:
/*
Constant for THD::where initialization in the beginning of every query.
It's needed because we do not save/restore THD::where normally during
primary (non subselect) query execution.
*/
static const char * const DEFAULT_WHERE;
#ifdef EMBEDDED_LIBRARY
struct st_mysql *mysql;
unsigned long client_stmt_id;
unsigned long client_param_count;
struct st_mysql_bind *client_params;
2003-10-06 13:32:38 +02:00
char *extra_data;
ulong extra_length;
struct st_mysql_data *cur_data;
struct st_mysql_data *first_data;
struct st_mysql_data **data_tail;
void clear_data_list();
struct st_mysql_data *alloc_new_dataset();
/*
In embedded server it points to the statement that is processed
in the current query. We store some results directly in statement
fields then.
*/
struct st_mysql_stmt *current_stmt;
#endif
NET net; // client connection descriptor
MEM_ROOT warn_root; // For warnings and errors
Protocol *protocol; // Current protocol
Protocol_simple protocol_simple; // Normal protocol
Protocol_prep protocol_prep; // Binary protocol
HASH user_vars; // hash for user variables
String packet; // dynamic buffer for network I/O
String convert_buffer; // buffer for charset conversions
struct sockaddr_in remote; // client socket address
struct rand_struct rand; // used for authentication
struct system_variables variables; // Changeable local variables
struct system_status_var status_var; // Per thread statistic vars
THR_LOCK_INFO lock_info; // Locking info of this thread
THR_LOCK_OWNER main_lock_id; // To use for conventional queries
THR_LOCK_OWNER *lock_id; // If not main_lock_id, points to
// the lock_id of a cursor.
/**
Protects THD data accessed from other threads:
- thd->query and thd->query_length (used by SHOW ENGINE
INNODB STATUS and SHOW PROCESSLIST
- thd->mysys_var (used by KILL statement and shutdown).
Is locked when THD is deleted.
*/
pthread_mutex_t LOCK_thd_data;
/* all prepared statements and cursors of this connection */
Statement_map stmt_map;
/*
A pointer to the stack frame of handle_one_connection(),
which is called first in the thread for handling a client
*/
char *thread_stack;
/*
db - currently selected database
This will be pushed only after I fix the testsuite. This is the main commit for Worklog tasks: * A more dynamic binlog format which allows small changes (1064) * Log session variables in Query_log_event (1063) Below 5.0 means 5.0.0. MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed), SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this works for queries, except LOAD DATA INFILE (for this it would have to wait for Dmitri's push of WL#874, which in turns waits for the present push, so... the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1, 5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1. Apart from that, the new binlog format is designed so that it can tolerate a little variation in the events (so that a 5.0.0 slave could replicate a 5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I later add replication of charsets it should break nothing. And when I later add a UID to every event, it should break nothing. The main change brought by this patch is a new type of event, Format_description_log_event, which describes some lengthes in other event types. This event is needed for the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event, we can later add more bytes to the header of every event without breaking compatibility. Inside Query_log_event, we have some additional dynamic format, as every Query_log_event can have a different number of status variables, stored as pairs (code, value); that's how SQL_MODE and session variables and catalog are stored. Like this, we can later add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows if we want. MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs. Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs), upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs); so both can be "hot" upgrades. Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0. 3.23 and 4.x can't be slaves of 5.0. So downgrading from 5.0 to 4.x may be complicated. Log_event::log_pos is now the position of the end of the event, which is more useful than the position of the beginning. We take care about compatibility with <5.0 (in which log_pos is the beginning). I added a short test for replication of SQL_MODE and some other variables. TODO: - after committing this, merge the latest 5.0 into it - fix all tests - update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
catalog - currently selected catalog
WARNING: some members of THD (currently 'db', 'catalog' and 'query') are
set and alloced by the slave SQL thread (for the THD of that thread); that
thread is (and must remain, for now) the only responsible for freeing these
3 members. If you add members here, and you add code to set them in
replication, don't forget to free_them_and_set_them_to_0 in replication
properly. For details see the 'err:' label of the handle_slave_sql()
in sql/slave.cc.
*/
char *db, *catalog;
Security_context main_security_ctx;
Security_context *security_ctx;
2003-04-02 15:16:19 +02:00
/* remote (peer) port */
uint16 peer_port;
/*
Points to info-string that we show in SHOW PROCESSLIST
You are supposed to update thd->proc_info only if you have coded
a time-consuming piece that MySQL can get stuck in for a long time.
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
Set it using the thd_proc_info(THD *thread, const char *message)
macro/function.
*/
const char *proc_info;
ulong client_capabilities; /* What the client supports */
ulong max_client_packet_length;
HASH handler_tables_hash;
2003-12-04 17:12:01 +01:00
/*
One thread can hold up to one named user-level lock. This variable
points to a lock object if the lock is present. See item_func.cc and
2006-04-13 09:50:33 +02:00
chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK.
2003-12-04 17:12:01 +01:00
*/
User_level_lock *ull;
#ifndef DBUG_OFF
uint dbug_sentry; // watch out for memory corruption
#endif
2000-07-31 21:29:14 +02:00
struct st_my_thread_var *mysys_var;
/*
Type of current query: COM_STMT_PREPARE, COM_QUERY, etc. Set from
first byte of the packet in do_command()
*/
enum enum_server_command command;
uint32 server_id;
uint32 file_id; // for LOAD DATA INFILE
/*
Used in error messages to tell user in what part of MySQL we found an
error. E. g. when where= "having clause", if fix_fields() fails, user
will know that the error was in having clause.
*/
2000-07-31 21:29:14 +02:00
const char *where;
time_t start_time,time_after_lock,user_time;
time_t connect_time,thr_create_time; // track down slow pthread_create
2000-07-31 21:29:14 +02:00
thr_lock_type update_lock_default;
Delayed_insert *di;
/* <> 0 if we are inside of trigger or stored function. */
uint in_sub_stmt;
2005-01-16 13:16:23 +01:00
/* container for handler's private per-connection data */
void *ha_data[MAX_HA];
2000-07-31 21:29:14 +02:00
struct st_transactions {
2005-01-16 13:16:23 +01:00
SAVEPOINT *savepoints;
THD_TRANS all; // Trans since BEGIN WORK
THD_TRANS stmt; // Trans for current statement
bool on; // see ha_enable_transaction()
XID_STATE xid_state;
/*
Tables changed in transaction (that must be invalidated in query cache).
List contain only transactional tables, that not invalidated in query
cache (instead of full list of changed in transaction tables).
*/
CHANGED_TABLE_LIST* changed_tables;
MEM_ROOT mem_root; // Transaction-life memory allocation pool
void cleanup()
{
changed_tables= 0;
savepoints= 0;
2005-01-16 13:16:23 +01:00
#ifdef USING_TRANSACTIONS
free_root(&mem_root,MYF(MY_KEEP_PREALLOC));
2005-01-16 13:16:23 +01:00
#endif
}
2005-01-16 13:16:23 +01:00
st_transactions()
{
#ifdef USING_TRANSACTIONS
2005-01-16 13:16:23 +01:00
bzero((char*)this, sizeof(*this));
xid_state.xid.null();
2005-01-16 13:16:23 +01:00
init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
#else
xid_state.xa_state= XA_NOTR;
2005-01-16 13:16:23 +01:00
#endif
}
2000-07-31 21:29:14 +02:00
} transaction;
Field *dupp_field;
#ifndef __WIN__
sigset_t signals,block_signals;
#endif
#ifdef SIGNAL_WITH_VIO_CLOSE
Vio* active_vio;
#endif
/*
This is to track items changed during execution of a prepared
statement/stored procedure. It's created by
register_item_tree_change() in memory root of THD, and freed in
2005-07-15 11:21:08 +02:00
rollback_item_tree_changes(). For conventional execution it's always
empty.
*/
Item_change_list change_list;
/*
2005-07-15 11:21:08 +02:00
A permanent memory area of the statement. For conventional
execution, the parsed tree and execution runtime reside in the same
memory root. In this case stmt_arena points to THD. In case of
2005-07-15 11:21:08 +02:00
a prepared statement or a stored procedure statement, thd->mem_root
conventionally points to runtime memory, and thd->stmt_arena
2005-07-15 11:21:08 +02:00
points to the memory of the PS/SP, where the parsed tree of the
statement resides. Whenever you need to perform a permanent
transformation of a parsed tree, you should allocate new memory in
stmt_arena, to allow correct re-execution of PS/SP.
Note: in the parser, stmt_arena == thd, even for PS/SP.
*/
Query_arena *stmt_arena;
/*
map for tables that will be updated for a multi-table update query
statement, for other query statements, this will be zero.
*/
table_map table_map_for_update;
/*
next_insert_id is set on SET INSERT_ID= #. This is used as the next
generated auto_increment value in handler.cc
*/
ulonglong next_insert_id;
/* Remember last next_insert_id to reset it if something went wrong */
ulonglong prev_insert_id;
/*
At the beginning of the statement last_insert_id holds the first
generated value of the previous statement. During statement
execution it is updated to the value just generated, but then
restored to the value that was generated first, so for the next
statement it will again be "the first generated value of the
previous statement".
It may also be set with "LAST_INSERT_ID(expr)" or
"@@LAST_INSERT_ID= expr", but the effect of such setting will be
seen only in the next statement.
*/
ulonglong last_insert_id;
/*
current_insert_id remembers the first generated value of the
previous statement, and does not change during statement
execution. Its value returned from LAST_INSERT_ID() and
@@LAST_INSERT_ID.
*/
ulonglong current_insert_id;
ulonglong limit_found_rows;
ulonglong options; /* Bitmap of states */
longlong row_count_func; /* For the ROW_COUNT() function */
ha_rows cuted_fields;
/*
number of rows we actually sent to the client, including "synthetic"
rows in ROLLUP etc.
*/
ha_rows sent_row_count;
/*
number of rows we read, sent or not, including in create_sort_index()
*/
ha_rows examined_row_count;
/*
The set of those tables whose fields are referenced in all subqueries
of the query.
TODO: possibly this it is incorrect to have used tables in THD because
with more than one subquery, it is not clear what does the field mean.
*/
table_map used_tables;
USER_CONN *user_connect;
CHARSET_INFO *db_charset;
2003-12-04 20:08:26 +01:00
/*
FIXME: this, and some other variables like 'count_cuted_fields'
maybe should be statement/cursor local, that is, moved to Statement
class. With current implementation warnings produced in each prepared
statement/cursor settle here.
2003-12-04 20:08:26 +01:00
*/
List <MYSQL_ERROR> warn_list;
uint warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END];
uint total_warn_count;
#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
PROFILING profiling;
Prevent bugs by making DBUG_* expressions syntactically equivalent to a single statement. --- Bug#24795: SHOW PROFILE Profiling is only partially functional on some architectures. Where there is no getrusage() system call, presently Null values are returned where it would be required. Notably, Windows needs some love applied to make it as useful. Syntax this adds: SHOW PROFILES SHOW PROFILE [types] [FOR QUERY n] [OFFSET n] [LIMIT n] where "n" is an integer and "types" is zero or many (comma-separated) of "CPU" "MEMORY" (not presently supported) "BLOCK IO" "CONTEXT SWITCHES" "PAGE FAULTS" "IPC" "SWAPS" "SOURCE" "ALL" It also adds a session variable (boolean) "profiling", set to "no" by default, and (integer) profiling_history_size, set to 15 by default. This patch abstracts setting THDs' "proc_info" behind a macro that can be used as a hook into the profiling code when profiling support is compiled in. All future code in this line should use that mechanism for setting thd->proc_info. --- Tests are now set to omit the statistics. --- Adds an Information_schema table, "profiling" for access to "show profile" data. --- Merge zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community-3--bug24795 into zippy.cornsilk.net:/home/cmiller/work/mysql/mysql-5.0-community --- Fix merge problems. --- Fixed one bug in the query_source being NULL. Updated test results. --- Include more thorough profiling tests. Improve support for prepared statements. Use session-specific query IDs, starting at zero. --- Selecting from I_S.profiling is no longer quashed in profiling, as requested by Giuseppe. Limit the size of captured query text. No longer log queries that are zero length.
2007-02-22 16:03:08 +01:00
#endif
/*
Id of current query. Statement can be reused to execute several queries
query_id is global in context of the whole MySQL server.
ID is automatically generated from mutex-protected counter.
It's used in handler code for various purposes: to check which columns
from table are necessary for this select, to check if it's necessary to
update auto-updatable fields (like auto_increment and timestamp).
*/
query_id_t query_id, warn_id;
ulong thread_id, col_access;
/* Statement id is thread-wide. This counter is used to generate ids */
ulong statement_id_counter;
ulong rand_saved_seed1, rand_saved_seed2;
/*
Row counter, mainly for errors and warnings. Not increased in
create_sort_index(); may differ from examined_row_count.
*/
ulong row_count;
long dbug_thread_id;
2000-07-31 21:29:14 +02:00
pthread_t real_id;
uint tmp_table, global_read_lock;
uint server_status,open_options,system_thread;
uint db_length;
uint select_number; //number of select (used for EXPLAIN)
/* variables.transaction_isolation is reset to this after each commit */
enum_tx_isolation session_tx_isolation;
enum_check_fields count_cuted_fields;
DYNAMIC_ARRAY user_var_events; /* For user variables replication */
MEM_ROOT *user_var_events_alloc; /* Allocate above array elements here */
enum killed_state
{
NOT_KILLED=0,
KILL_BAD_DATA=1,
KILL_CONNECTION=ER_SERVER_SHUTDOWN,
KILL_QUERY=ER_QUERY_INTERRUPTED,
KILLED_NO_VALUE /* means neither of the states */
};
killed_state volatile killed;
/* scramble - random string sent to client on handshake */
2003-07-18 16:57:21 +02:00
char scramble[SCRAMBLE_LENGTH+1];
bool slave_thread, one_shot_set;
bool locked, some_tables_deleted;
bool last_cuted_field;
bool no_errors, password;
/**
Set to TRUE if execution of the current compound statement
can not continue. In particular, disables activation of
CONTINUE or EXIT handlers of stored routines.
Reset in the end of processing of the current user request, in
@see mysql_reset_thd_for_next_command().
*/
bool is_fatal_error;
/**
Set by a storage engine to request the entire
transaction (that possibly spans multiple engines) to
rollback. Reset in ha_rollback.
*/
bool transaction_rollback_request;
/**
TRUE if we are in a sub-statement and the current error can
not be safely recovered until we left the sub-statement mode.
In particular, disables activation of CONTINUE and EXIT
handlers inside sub-statements. E.g. if it is a deadlock
error and requires a transaction-wide rollback, this flag is
raised (traditionally, MySQL first has to close all the reads
via @see handler::ha_index_or_rnd_end() and only then perform
the rollback).
Reset to FALSE when we leave the sub-statement mode.
*/
bool is_fatal_sub_stmt_error;
bool query_start_used, rand_used, time_zone_used;
/*
last_insert_id_used is set when current statement calls
LAST_INSERT_ID() or reads @@LAST_INSERT_ID.
*/
bool last_insert_id_used;
/*
last_insert_id_used is set when current statement or any stored
function called from this statement calls LAST_INSERT_ID() or
reads @@LAST_INSERT_ID, so that binary log LAST_INSERT_ID_EVENT be
generated. Required for statement-based binary log for issuing
"SET LAST_INSERT_ID= #" before "SELECT func()", if func() reads
LAST_INSERT_ID.
*/
bool last_insert_id_used_bin_log;
/*
insert_id_used is set when current statement updates
THD::last_insert_id, so that binary log INSERT_ID_EVENT be
generated.
*/
bool insert_id_used;
/*
clear_next_insert_id is set if engine was called at least once
for this statement to generate auto_increment value.
*/
bool clear_next_insert_id;
/* for IS NULL => = last_insert_id() fix in remove_eq_conds() */
bool substitute_null_with_insert_id;
bool in_lock_tables;
bool query_error, bootstrap, cleanup_done;
bool tmp_table_used;
/** is set if some thread specific value(s) used in a statement. */
bool thread_specific_used;
bool charset_is_system_charset, charset_is_collation_connection;
bool charset_is_character_set_filesystem;
bool enable_slow_log; /* enable slow log for current statement */
bool abort_on_warning;
2005-02-11 22:33:52 +01:00
bool got_warning; /* Set on call to push_warning() */
bool no_warnings_for_error; /* no warnings on call to my_error() */
/* set during loop of derived table processing */
bool derived_tables_processing;
my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */
sp_rcontext *spcont; // SP runtime context
sp_cache *sp_proc_cache;
sp_cache *sp_func_cache;
/** number of name_const() substitutions, see sp_head.cc:subst_spvars() */
uint query_name_consts;
/*
If we do a purge of binary logs, log index info of the threads
that are currently reading it needs to be adjusted. To do that
each thread that is using LOG_INFO needs to adjust the pointer to it
*/
LOG_INFO* current_linfo;
NET* slave_net; // network connection from slave -> m.
/* Used by the sys_var class to store temporary values */
union
{
my_bool my_bool_value;
long long_value;
ulong ulong_value;
} sys_var_tmp;
2006-04-13 09:50:33 +02:00
struct {
2006-04-13 09:50:33 +02:00
/*
If true, mysql_bin_log::write(Log_event) call will not write events to
binlog, and maintain 2 below variables instead (use
mysql_bin_log.start_union_events to turn this on)
*/
bool do_union;
/*
If TRUE, at least one mysql_bin_log::write(Log_event) call has been
made after last mysql_bin_log.start_union_events() call.
*/
bool unioned_events;
/*
2006-04-13 09:50:33 +02:00
If TRUE, at least one mysql_bin_log::write(Log_event e), where
e.cache_stmt == TRUE call has been made after last
mysql_bin_log.start_union_events() call.
*/
bool unioned_events_trans;
2006-04-13 09:50:33 +02:00
/*
'queries' (actually SP statements) that run under inside this binlog
union have thd->query_id >= first_query_id.
*/
query_id_t first_query_id;
} binlog_evt_union;
2006-04-13 09:50:33 +02:00
Bug#25411 (trigger code truncated), PART I The issue found with bug 25411 is due to the function skip_rear_comments() which damages the source code while implementing a work around. The root cause of the problem is in the lexical analyser, which does not process special comments properly. For special comments like : [1] aaa /*!50000 bbb */ ccc since 5.0 is a version older that the current code, the parser is in lining the content of the special comment, so that the query to process is [2] aaa bbb ccc However, the text of the query captured when processing a stored procedure, stored function or trigger (or event in 5.1), can be after rebuilding it: [3] aaa bbb */ ccc which is wrong. To fix bug 25411 properly, the lexical analyser needs to return [2] when in lining special comments. In order to implement this, some preliminary cleanup is required in the code, which is implemented by this patch. Before this change, the structure named LEX (or st_lex) contains attributes that belong to lexical analysis, as well as attributes that represents the abstract syntax tree (AST) of a statement. Creating a new LEX structure for each statements (which makes sense for the AST part) also re-initialized the lexical analysis phase each time, which is conceptually wrong. With this patch, the previous st_lex structure has been split in two: - st_lex represents the Abstract Syntax Tree for a statement. The name "lex" has not been changed to avoid a bigger impact in the code base. - class lex_input_stream represents the internal state of the lexical analyser, which by definition should *not* be reinitialized when parsing multiple statements from the same input stream. This change is a pre-requisite for bug 25411, since the implementation of lex_input_stream will later improve to deal properly with special comments, and this processing can not be done with the current implementation of sp_head::reset_lex and sp_head::restore_lex, which interfere with the lexer. This change set alone does not fix bug 25411.
2007-04-24 17:24:21 +02:00
/**
Internal parser state.
Note that since the parser is not re-entrant, we keep only one parser
state here. This member is valid only when executing code during parsing.
Bug#25411 (trigger code truncated), PART I The issue found with bug 25411 is due to the function skip_rear_comments() which damages the source code while implementing a work around. The root cause of the problem is in the lexical analyser, which does not process special comments properly. For special comments like : [1] aaa /*!50000 bbb */ ccc since 5.0 is a version older that the current code, the parser is in lining the content of the special comment, so that the query to process is [2] aaa bbb ccc However, the text of the query captured when processing a stored procedure, stored function or trigger (or event in 5.1), can be after rebuilding it: [3] aaa bbb */ ccc which is wrong. To fix bug 25411 properly, the lexical analyser needs to return [2] when in lining special comments. In order to implement this, some preliminary cleanup is required in the code, which is implemented by this patch. Before this change, the structure named LEX (or st_lex) contains attributes that belong to lexical analysis, as well as attributes that represents the abstract syntax tree (AST) of a statement. Creating a new LEX structure for each statements (which makes sense for the AST part) also re-initialized the lexical analysis phase each time, which is conceptually wrong. With this patch, the previous st_lex structure has been split in two: - st_lex represents the Abstract Syntax Tree for a statement. The name "lex" has not been changed to avoid a bigger impact in the code base. - class lex_input_stream represents the internal state of the lexical analyser, which by definition should *not* be reinitialized when parsing multiple statements from the same input stream. This change is a pre-requisite for bug 25411, since the implementation of lex_input_stream will later improve to deal properly with special comments, and this processing can not be done with the current implementation of sp_head::reset_lex and sp_head::restore_lex, which interfere with the lexer. This change set alone does not fix bug 25411.
2007-04-24 17:24:21 +02:00
*/
Parser_state *m_parser_state;
Bug#25411 (trigger code truncated), PART I The issue found with bug 25411 is due to the function skip_rear_comments() which damages the source code while implementing a work around. The root cause of the problem is in the lexical analyser, which does not process special comments properly. For special comments like : [1] aaa /*!50000 bbb */ ccc since 5.0 is a version older that the current code, the parser is in lining the content of the special comment, so that the query to process is [2] aaa bbb ccc However, the text of the query captured when processing a stored procedure, stored function or trigger (or event in 5.1), can be after rebuilding it: [3] aaa bbb */ ccc which is wrong. To fix bug 25411 properly, the lexical analyser needs to return [2] when in lining special comments. In order to implement this, some preliminary cleanup is required in the code, which is implemented by this patch. Before this change, the structure named LEX (or st_lex) contains attributes that belong to lexical analysis, as well as attributes that represents the abstract syntax tree (AST) of a statement. Creating a new LEX structure for each statements (which makes sense for the AST part) also re-initialized the lexical analysis phase each time, which is conceptually wrong. With this patch, the previous st_lex structure has been split in two: - st_lex represents the Abstract Syntax Tree for a statement. The name "lex" has not been changed to avoid a bigger impact in the code base. - class lex_input_stream represents the internal state of the lexical analyser, which by definition should *not* be reinitialized when parsing multiple statements from the same input stream. This change is a pre-requisite for bug 25411, since the implementation of lex_input_stream will later improve to deal properly with special comments, and this processing can not be done with the current implementation of sp_head::reset_lex and sp_head::restore_lex, which interfere with the lexer. This change set alone does not fix bug 25411.
2007-04-24 17:24:21 +02:00
2000-07-31 21:29:14 +02:00
THD();
~THD();
void init(void);
/*
Initialize memory roots necessary for query processing and (!)
pre-allocate memory for it. We can't do that in THD constructor because
there are use cases (acl_init, delayed inserts, watcher threads,
killing mysqld) where it's vital to not allocate excessive and not used
memory. Note, that we still don't return error from init_for_queries():
if preallocation fails, we should notice that at the first call to
2006-04-13 09:50:33 +02:00
alloc_root.
*/
void init_for_queries();
void change_user(void);
void cleanup(void);
void cleanup_after_query();
2000-07-31 21:29:14 +02:00
bool store_globals();
#ifdef SIGNAL_WITH_VIO_CLOSE
inline void set_active_vio(Vio* vio)
{
pthread_mutex_lock(&LOCK_thd_data);
active_vio = vio;
pthread_mutex_unlock(&LOCK_thd_data);
}
inline void clear_active_vio()
{
pthread_mutex_lock(&LOCK_thd_data);
active_vio = 0;
pthread_mutex_unlock(&LOCK_thd_data);
}
void close_active_vio();
#endif
void awake(THD::killed_state state_to_set);
/*
For enter_cond() / exit_cond() to work the mutex must be got before
enter_cond(); this mutex is then released by exit_cond().
Usage must be: lock mutex; enter_cond(); your code; exit_cond().
*/
inline const char* enter_cond(pthread_cond_t *cond, pthread_mutex_t* mutex,
const char* msg)
{
const char* old_msg = proc_info;
safe_mutex_assert_owner(mutex);
mysys_var->current_mutex = mutex;
mysys_var->current_cond = cond;
proc_info = msg;
return old_msg;
}
inline void exit_cond(const char* old_msg)
{
/*
Putting the mutex unlock in exit_cond() ensures that
mysys_var->current_mutex is always unlocked _before_ mysys_var->mutex is
locked (if that would not be the case, you'll get a deadlock if someone
does a THD::awake() on you).
*/
pthread_mutex_unlock(mysys_var->current_mutex);
pthread_mutex_lock(&mysys_var->mutex);
mysys_var->current_mutex = 0;
mysys_var->current_cond = 0;
proc_info = old_msg;
pthread_mutex_unlock(&mysys_var->mutex);
}
static inline void safe_time(time_t *t)
{
/**
Wrapper around time() which retries on error (-1)
@details
This is needed because, despite the documentation, time() may fail
in some circumstances. Here we retry time() until it succeeds, and
log the failure so that performance problems related to this can be
identified.
*/
while(unlikely(time(t) == ((time_t) -1)))
sql_print_information("time() failed with %d", errno);
}
2000-07-31 21:29:14 +02:00
inline time_t query_start() { query_start_used=1; return start_time; }
inline void set_time() { if (user_time) start_time=time_after_lock=user_time; else { safe_time(&start_time); time_after_lock= start_time; }}
inline void end_time() { safe_time(&start_time); }
2000-11-16 19:47:28 +01:00
inline void set_time(time_t t) { time_after_lock=start_time=user_time=t; }
inline void lock_time() { safe_time(&time_after_lock); }
/*TODO: this will be obsolete when we have support for 64 bit my_time_t */
inline bool is_valid_time()
{
return (start_time < (time_t) MY_TIME_T_MAX);
}
inline void insert_id(ulonglong id_arg)
{
last_insert_id= id_arg;
insert_id_used=1;
substitute_null_with_insert_id= TRUE;
}
inline ulonglong found_rows(void)
{
return limit_found_rows;
2005-01-16 13:16:23 +01:00
}
inline bool active_transaction()
{
2005-01-16 13:16:23 +01:00
#ifdef USING_TRANSACTIONS
return server_status & SERVER_STATUS_IN_TRANS;
#else
return 0;
#endif
}
inline bool fill_derived_tables()
{
return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();
}
inline gptr trans_alloc(unsigned int size)
{
return alloc_root(&transaction.mem_root,size);
}
bool convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
const char *from, uint from_length,
CHARSET_INFO *from_cs);
bool convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs);
void add_changed_table(TABLE *table);
void add_changed_table(const char *key, long key_length);
CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length);
int send_explain_fields(select_result *result);
#ifndef EMBEDDED_LIBRARY
2002-11-03 23:56:25 +01:00
inline void clear_error()
{
net.last_error[0]= 0;
net.last_errno= 0;
net.report_error= 0;
query_error= 0;
2002-11-03 23:56:25 +01:00
}
inline bool vio_ok() const { return net.vio != 0; }
#else
void clear_error();
inline bool vio_ok() const { return true; }
#endif
inline void fatal_error()
{
is_fatal_error= 1;
net.report_error= 1;
2003-04-02 15:16:19 +02:00
DBUG_PRINT("error",("Fatal error set"));
}
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
inline Query_arena *activate_stmt_arena_if_needed(Query_arena *backup)
{
/*
Use the persistent arena if we are in a prepared statement or a stored
procedure statement and we have not already changed to use this arena.
*/
if (!stmt_arena->is_conventional() && mem_root != stmt_arena->mem_root)
{
set_n_backup_active_arena(stmt_arena, backup);
return stmt_arena;
}
return 0;
}
void change_item_tree(Item **place, Item *new_value)
{
/* TODO: check for OOM condition here */
if (!stmt_arena->is_conventional())
nocheck_register_item_tree_change(place, *place, mem_root);
*place= new_value;
}
void nocheck_register_item_tree_change(Item **place, Item *old_value,
MEM_ROOT *runtime_memroot);
void rollback_item_tree_changes();
/*
Cleanup statement parse state (parse tree, lex) and execution
state after execution of a non-prepared SQL statement.
*/
void end_statement();
inline int killed_errno() const
{
killed_state killed_val; /* to cache the volatile 'killed' */
return (killed_val= killed) != KILL_BAD_DATA ? killed_val : 0;
}
inline void send_kill_message() const
{
2004-11-12 13:34:00 +01:00
int err= killed_errno();
if (err)
{
if ((err == KILL_CONNECTION) && !shutdown_in_progress)
err = KILL_QUERY;
my_message(err, ER(err), MYF(0));
}
}
/* return TRUE if we will abort query if we make a warning now */
inline bool really_abort_on_warning()
{
return (abort_on_warning &&
(pushing for Andrei) Bug #27417 thd->no_trans_update.stmt lost value inside of SF-exec-stack Once had been set the flag might later got reset inside of a stored routine execution stack. The reason was in that there was no check if a new statement started at time of resetting. The artifact affects most of binlogable DML queries. Notice, that multi-update is wrapped up within bug@27716 fix, multi-delete bug@29136. Fixed with saving parent's statement flag of whether the statement modified non-transactional table, and unioning (merging) the value with that was gained in mysql_execute_command. Resettling thd->no_trans_update members into thd->transaction.`member`; Asserting code; Effectively the following properties are held. 1. At the end of a substatement thd->transaction.stmt.modified_non_trans_table reflects the fact if such a table got modified by the substatement. That also respects THD::really_abort_on_warnin() requirements. 2. Eventually thd->transaction.stmt.modified_non_trans_table will be computed as the union of the values of all invoked sub-statements. That fixes this bug#27417; Computing of thd->transaction.all.modified_non_trans_table is refined to base to the stmt's value for all the case including insert .. select statement which before the patch had an extra issue bug@28960. Minor issues are covered with mysql_load, mysql_delete, and binloggin of insert in to temp_table select. The supplied test verifies limitely, mostly asserts. The ultimate testing is defered for bug@13270, bug@23333.
2007-07-30 17:27:36 +02:00
(!transaction.stmt.modified_non_trans_table ||
(variables.sql_mode & MODE_STRICT_ALL_TABLES)));
}
void set_status_var_init();
bool is_context_analysis_only()
{ return stmt_arena->is_stmt_prepare() || lex->view_prepare_mode; }
void reset_n_backup_open_tables_state(Open_tables_state *backup);
void restore_backup_open_tables_state(Open_tables_state *backup);
void reset_sub_statement_state(Sub_statement_state *backup, uint new_state);
void restore_sub_statement_state(Sub_statement_state *backup);
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
void restore_active_arena(Query_arena *set, Query_arena *backup);
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
/*
Initialize the current database from a NULL-terminated string with length
If we run out of memory, we free the current database and return TRUE.
This way the user will notice the error as there will be no current
database selected (in addition to the error message set by malloc).
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
*/
bool set_db(const char *new_db, uint new_db_len)
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
{
/* Do not reallocate memory if current chunk is big enough. */
if (db && new_db && db_length >= new_db_len)
memcpy(db, new_db, new_db_len+1);
else
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
{
x_free(db);
db= new_db ? my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)) :
NULL;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
}
db_length= db ? new_db_len : 0;
return new_db && !db;
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
}
void reset_db(char *new_db, uint new_db_len)
{
db= new_db;
db_length= new_db_len;
}
/*
Copy the current database to the argument. Use the current arena to
allocate memory for a deep copy: current database may be freed after
a statement is parsed but before it's executed.
*/
bool copy_db_to(char **p_db, uint *p_db_length)
{
if (db == NULL)
{
my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
return TRUE;
}
*p_db= strmake(db, db_length);
if (p_db_length)
*p_db_length= db_length;
return FALSE;
}
Bug#8407 (Stored functions/triggers ignore exception handler) Bug 18914 (Calling certain SPs from triggers fail) Bug 20713 (Functions will not not continue for SQLSTATE VALUE '42S02') Bug 21825 (Incorrect message error deleting records in a table with a trigger for inserting) Bug 22580 (DROP TABLE in nested stored procedure causes strange dependency error) Bug 25345 (Cursors from Functions) This fix resolves a long standing issue originally reported with bug 8407, which affect the behavior of Stored Procedures, Stored Functions and Trigger in many different ways, causing symptoms reported by all the bugs listed. In all cases, the root cause of the problem traces back to 8407 and how the server locks tables involved with sub statements. Prior to this fix, the implementation of stored routines would: - compute the transitive closure of all the tables referenced by a top level statement - open and lock all the tables involved - execute the top level statement "transitive closure of tables" means collecting: - all the tables, - all the stored functions, - all the views, - all the table triggers - all the stored procedures involved, and recursively inspect these objects definition to find more references to more objects, until the list of every object referenced does not grow any more. This mechanism is known as "pre-locking" tables before execution. The motivation for locking all the tables (possibly) used at once is to prevent dead locks. One problem with this approach is that, if the execution path the code really takes during runtime does not use a given table, and if the table is missing, the server would not execute the statement. This in particular has a major impact on triggers, since a missing table referenced by an update/delete trigger would prevent an insert trigger to run. Another problem is that stored routines might define SQL exception handlers to deal with missing tables, but the server implementation would never give user code a chance to execute this logic, since the routine is never executed when a missing table cause the pre-locking code to fail. With this fix, the internal implementation of the pre-locking code has been relaxed of some constraints, so that failure to open a table does not necessarily prevent execution of a stored routine. In particular, the pre-locking mechanism is now behaving as follows: 1) the first step, to compute the transitive closure of all the tables possibly referenced by a statement, is unchanged. 2) the next step, which is to open all the tables involved, only attempts to open the tables added by the pre-locking code, but silently fails without reporting any error or invoking any exception handler is the table is not present. This is achieved by trapping internal errors with Prelock_error_handler 3) the locking step only locks tables that were successfully opened. 4) when executing sub statements, the list of tables used by each statements is evaluated as before. The tables needed by the sub statement are expected to be already opened and locked. Statement referencing tables that were not opened in step 2) will fail to find the table in the open list, and only at this point will execution of the user code fail. 5) when a runtime exception is raised at 4), the instruction continuation destination (the next instruction to execute in case of SQL continue handlers) is evaluated. This is achieved with sp_instr::exec_open_and_lock_tables() 6) if a user exception handler is present in the stored routine, that handler is invoked as usual, so that ER_NO_SUCH_TABLE exceptions can be trapped by stored routines. If no handler exists, then the runtime execution will fail as expected. With all these changes, a side effect is that view security is impacted, in two different ways. First, a view defined as "select stored_function()", where the stored function references a table that may not exist, is considered valid. The rationale is that, because the stored function might trap exceptions during execution and still return a valid result, there is no way to decide when the view is created if a missing table really cause the view to be invalid. Secondly, testing for existence of tables is now done later during execution. View security, which consist of trapping errors and return a generic ER_VIEW_INVALID (to prevent disclosing information) was only implemented at very specific phases covering *opening* tables, but not covering the runtime execution. Because of this existing limitation, errors that were previously trapped and converted into ER_VIEW_INVALID are not trapped, causing table names to be reported to the user. This change is exposing an existing problem, which is independent and will be resolved separately.
2007-03-06 03:42:07 +01:00
public:
/**
Add an internal error handler to the thread execution context.
@param handler the exception handler to add
*/
void push_internal_handler(Internal_error_handler *handler);
/**
Handle an error condition.
@param sql_errno the error number
@param level the error level
@return true if the error is handled
*/
virtual bool handle_error(uint sql_errno,
MYSQL_ERROR::enum_warning_level level);
/**
Remove the error handler last pushed.
*/
void pop_internal_handler();
/** Overloaded to guard query/query_length fields */
virtual void set_statement(Statement *stmt);
/**
Assign a new value to thd->query.
Protected with LOCK_thd_data mutex.
*/
void set_query(char *query_arg, uint32 query_length_arg);
Bug#8407 (Stored functions/triggers ignore exception handler) Bug 18914 (Calling certain SPs from triggers fail) Bug 20713 (Functions will not not continue for SQLSTATE VALUE '42S02') Bug 21825 (Incorrect message error deleting records in a table with a trigger for inserting) Bug 22580 (DROP TABLE in nested stored procedure causes strange dependency error) Bug 25345 (Cursors from Functions) This fix resolves a long standing issue originally reported with bug 8407, which affect the behavior of Stored Procedures, Stored Functions and Trigger in many different ways, causing symptoms reported by all the bugs listed. In all cases, the root cause of the problem traces back to 8407 and how the server locks tables involved with sub statements. Prior to this fix, the implementation of stored routines would: - compute the transitive closure of all the tables referenced by a top level statement - open and lock all the tables involved - execute the top level statement "transitive closure of tables" means collecting: - all the tables, - all the stored functions, - all the views, - all the table triggers - all the stored procedures involved, and recursively inspect these objects definition to find more references to more objects, until the list of every object referenced does not grow any more. This mechanism is known as "pre-locking" tables before execution. The motivation for locking all the tables (possibly) used at once is to prevent dead locks. One problem with this approach is that, if the execution path the code really takes during runtime does not use a given table, and if the table is missing, the server would not execute the statement. This in particular has a major impact on triggers, since a missing table referenced by an update/delete trigger would prevent an insert trigger to run. Another problem is that stored routines might define SQL exception handlers to deal with missing tables, but the server implementation would never give user code a chance to execute this logic, since the routine is never executed when a missing table cause the pre-locking code to fail. With this fix, the internal implementation of the pre-locking code has been relaxed of some constraints, so that failure to open a table does not necessarily prevent execution of a stored routine. In particular, the pre-locking mechanism is now behaving as follows: 1) the first step, to compute the transitive closure of all the tables possibly referenced by a statement, is unchanged. 2) the next step, which is to open all the tables involved, only attempts to open the tables added by the pre-locking code, but silently fails without reporting any error or invoking any exception handler is the table is not present. This is achieved by trapping internal errors with Prelock_error_handler 3) the locking step only locks tables that were successfully opened. 4) when executing sub statements, the list of tables used by each statements is evaluated as before. The tables needed by the sub statement are expected to be already opened and locked. Statement referencing tables that were not opened in step 2) will fail to find the table in the open list, and only at this point will execution of the user code fail. 5) when a runtime exception is raised at 4), the instruction continuation destination (the next instruction to execute in case of SQL continue handlers) is evaluated. This is achieved with sp_instr::exec_open_and_lock_tables() 6) if a user exception handler is present in the stored routine, that handler is invoked as usual, so that ER_NO_SUCH_TABLE exceptions can be trapped by stored routines. If no handler exists, then the runtime execution will fail as expected. With all these changes, a side effect is that view security is impacted, in two different ways. First, a view defined as "select stored_function()", where the stored function references a table that may not exist, is considered valid. The rationale is that, because the stored function might trap exceptions during execution and still return a valid result, there is no way to decide when the view is created if a missing table really cause the view to be invalid. Secondly, testing for existence of tables is now done later during execution. View security, which consist of trapping errors and return a generic ER_VIEW_INVALID (to prevent disclosing information) was only implemented at very specific phases covering *opening* tables, but not covering the runtime execution. Because of this existing limitation, errors that were previously trapped and converted into ER_VIEW_INVALID are not trapped, causing table names to be reported to the user. This change is exposing an existing problem, which is independent and will be resolved separately.
2007-03-06 03:42:07 +01:00
private:
/** The current internal error handler for this thread, or NULL. */
Internal_error_handler *m_internal_handler;
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review fixes). The legend: on a replication slave, in case a trigger creation was filtered out because of application of replicate-do-table/ replicate-ignore-table rule, the parsed definition of a trigger was not cleaned up properly. LEX::sphead member was left around and leaked memory. Until the actual implementation of support of replicate-ignore-table rules for triggers by the patch for Bug 24478 it was never the case that "case SQLCOM_CREATE_TRIGGER" was not executed once a trigger was parsed, so the deletion of lex->sphead there worked and the memory did not leak. The fix: The real cause of the bug is that there is no 1 or 2 places where we can clean up the main LEX after parse. And the reason we can not have just one or two places where we clean up the LEX is asymmetric behaviour of MYSQLparse in case of success or error. One of the root causes of this behaviour is the code in Item::Item() constructor. There, a newly created item adds itself to THD::free_list - a single-linked list of Items used in a statement. Yuck. This code is unaware that we may have more than one statement active at a time, and always assumes that the free_list of the current statement is located in THD::free_list. One day we need to be able to explicitly allocate an item in a given Query_arena. Thus, when parsing a definition of a stored procedure, like CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END; we actually need to reset THD::mem_root, THD::free_list and THD::lex to parse the nested procedure statement (SELECT *). The actual reset and restore is implemented in semantic actions attached to sp_proc_stmt grammar rule. The problem is that in case of a parsing error inside a nested statement Bison generated parser would abort immediately, without executing the restore part of the semantic action. This would leave THD in an in-the-middle-of-parsing state. This is why we couldn't have had a single place where we clean up the LEX after MYSQLparse - in case of an error we needed to do a clean up immediately, in case of success a clean up could have been delayed. This left the door open for a memory leak. One of the following possibilities were considered when working on a fix: - patch the replication logic to do the clean up. Rejected as breaks module borders, replication code should not need to know the gory details of clean up procedure after CREATE TRIGGER. - wrap MYSQLparse with a function that would do a clean up. Rejected as ideally we should fix the problem when it happens, not adjust for it outside of the problematic code. - make sure MYSQLparse cleans up after itself by invoking the clean up functionality in the appropriate places before return. Implemented in this patch. - use %destructor rule for sp_proc_stmt to restore THD - cleaner than the prevoius approach, but rejected because needs a careful analysis of the side effects, and this patch is for 5.0, and long term we need to use the next alternative anyway - make sure that sp_proc_stmt doesn't juggle with THD - this is a large work that will affect many modules. Cleanup: move main_lex and main_mem_root from Statement to its only two descendants Prepared_statement and THD. This ensures that when a Statement instance was created for purposes of statement backup, we do not involve LEX constructor/destructor, which is fairly expensive. In order to track that the transformation produces equivalent functionality please check the respective constructors and destructors of Statement, Prepared_statement and THD - these members were used only there. This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
/**
The lex to hold the parsed tree of conventional (non-prepared) queries.
Whereas for prepared and stored procedure statements we use an own lex
instance for each new query, for conventional statements we reuse
the same lex. (@see mysql_parse for details).
*/
LEX main_lex;
/**
This memory root is used for two purposes:
- for conventional queries, to allocate structures stored in main_lex
during parsing, and allocate runtime data (execution plan, etc.)
during execution.
- for prepared queries, only to allocate runtime data. The parsed
tree itself is reused between executions and thus is stored elsewhere.
*/
MEM_ROOT main_mem_root;
2000-07-31 21:29:14 +02:00
};
#define tmp_disable_binlog(A) \
{ulonglong tmp_disable_binlog__save_options= (A)->options; \
(A)->options&= ~OPTION_BIN_LOG
#define reenable_binlog(A) (A)->options= tmp_disable_binlog__save_options;}
/* Flags for the THD::system_thread (bitmap) variable */
#define SYSTEM_THREAD_DELAYED_INSERT 1
#define SYSTEM_THREAD_SLAVE_IO 2
#define SYSTEM_THREAD_SLAVE_SQL 4
2002-03-16 09:36:27 +01:00
/*
Used to hold information about file and file structure in exchange
2002-03-16 09:36:27 +01:00
via non-DB file (...INTO OUTFILE..., ...LOAD DATA...)
XXX: We never call destructor for objects of this class.
2002-03-16 09:36:27 +01:00
*/
2000-07-31 21:29:14 +02:00
class sql_exchange :public Sql_alloc
{
public:
char *file_name;
String *field_term,*enclosed,*line_term,*line_start,*escaped;
bool opt_enclosed;
bool dumpfile;
ulong skip_lines;
CHARSET_INFO *cs;
2000-07-31 21:29:14 +02:00
sql_exchange(char *name,bool dumpfile_flag);
bool escaped_given(void);
2000-07-31 21:29:14 +02:00
};
#include "log_event.h"
/*
This is used to get result from a select
2000-07-31 21:29:14 +02:00
*/
2001-07-01 12:20:53 +02:00
class JOIN;
2000-07-31 21:29:14 +02:00
class select_result :public Sql_alloc {
protected:
THD *thd;
SELECT_LEX_UNIT *unit;
uint nest_level;
2000-07-31 21:29:14 +02:00
public:
select_result();
virtual ~select_result() {};
virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
unit= u;
return 0;
}
virtual int prepare2(void) { return 0; }
/*
Because of peculiarities of prepared statements protocol
we need to know number of columns in the result set (if
there is a result set) apart from sending columns metadata.
*/
virtual uint field_count(List<Item> &fields) const
{ return fields.elements; }
virtual bool send_fields(List<Item> &list, uint flags)=0;
2000-07-31 21:29:14 +02:00
virtual bool send_data(List<Item> &items)=0;
virtual bool initialize_tables (JOIN *join=0) { return 0; }
virtual void send_error(uint errcode,const char *err);
2000-07-31 21:29:14 +02:00
virtual bool send_eof()=0;
/**
Check if this query returns a result set and therefore is allowed in
cursors and set an error message if it is not the case.
@retval FALSE success
@retval TRUE error, an error message is set
*/
virtual bool check_simple_select() const;
2000-07-31 21:29:14 +02:00
virtual void abort() {}
/*
Cleanup instance of this class for next execution of a prepared
statement/stored procedure.
*/
virtual void cleanup();
void set_thd(THD *thd_arg) { thd= thd_arg; }
/**
The nest level, if supported.
@return
-1 if nest level is undefined, otherwise a positive integer.
*/
int get_nest_level() { return nest_level; }
#ifdef EMBEDDED_LIBRARY
virtual void begin_dataset() {}
#else
void begin_dataset() {}
#endif
2000-07-31 21:29:14 +02:00
};
/*
Base class for select_result descendands which intercept and
transform result set rows. As the rows are not sent to the client,
sending of result set metadata should be suppressed as well.
*/
class select_result_interceptor: public select_result
{
public:
select_result_interceptor() {} /* Remove gcc warning */
uint field_count(List<Item> &fields) const { return 0; }
bool send_fields(List<Item> &fields, uint flag) { return FALSE; }
};
2000-07-31 21:29:14 +02:00
class select_send :public select_result {
int status;
2000-07-31 21:29:14 +02:00
public:
select_send() :status(0) {}
bool send_fields(List<Item> &list, uint flags);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
bool send_eof();
virtual bool check_simple_select() const { return FALSE; }
void abort();
2000-07-31 21:29:14 +02:00
};
class select_to_file :public select_result_interceptor {
protected:
2000-07-31 21:29:14 +02:00
sql_exchange *exchange;
File file;
IO_CACHE cache;
ha_rows row_count;
char path[FN_REFLEN];
public:
select_to_file(sql_exchange *ex) :exchange(ex), file(-1),row_count(0L)
{ path[0]=0; }
~select_to_file();
void send_error(uint errcode,const char *err);
bool send_eof();
void cleanup();
};
#define ESCAPE_CHARS "ntrb0ZN" // keep synchronous with READ_INFO::unescape
/*
List of all possible characters of a numeric value text representation.
*/
#define NUMERIC_CHARS ".0123456789e+-"
class select_export :public select_to_file {
2000-07-31 21:29:14 +02:00
uint field_term_length;
int field_sep_char,escape_char,line_sep_char;
int field_term_char; // first char of FIELDS TERMINATED BY or MAX_INT
/*
The is_ambiguous_field_sep field is true if a value of the field_sep_char
field is one of the 'n', 't', 'r' etc characters
(see the READ_INFO::unescape method and the ESCAPE_CHARS constant value).
*/
bool is_ambiguous_field_sep;
/*
The is_ambiguous_field_term is true if field_sep_char contains the first
char of the FIELDS TERMINATED BY (ENCLOSED BY is empty), and items can
contain this character.
*/
bool is_ambiguous_field_term;
/*
The is_unsafe_field_sep field is true if a value of the field_sep_char
field is one of the '0'..'9', '+', '-', '.' and 'e' characters
(see the NUMERIC_CHARS constant value).
*/
bool is_unsafe_field_sep;
2000-07-31 21:29:14 +02:00
bool fixed_row_size;
public:
/**
Creates a select_export to represent INTO OUTFILE <filename> with a
defined level of subquery nesting.
*/
select_export(sql_exchange *ex, uint nest_level_arg) :select_to_file(ex)
{
nest_level= nest_level_arg;
}
2000-07-31 21:29:14 +02:00
~select_export();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
};
class select_dump :public select_to_file {
2000-07-31 21:29:14 +02:00
public:
/**
Creates a select_export to represent INTO DUMPFILE <filename> with a
defined level of subquery nesting.
*/
select_dump(sql_exchange *ex, uint nest_level_arg) :
select_to_file(ex)
{
nest_level= nest_level_arg;
}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
};
class select_insert :public select_result_interceptor {
2001-07-11 13:06:41 +02:00
public:
2004-07-16 00:15:55 +02:00
TABLE_LIST *table_list;
2000-07-31 21:29:14 +02:00
TABLE *table;
List<Item> *fields;
ulonglong autoinc_value_of_last_inserted_row; // not autogenerated
ulonglong autoinc_value_of_first_inserted_row; // autogenerated
2000-07-31 21:29:14 +02:00
COPY_INFO info;
2004-07-16 00:15:55 +02:00
bool insert_into_view;
2004-12-22 12:54:39 +01:00
select_insert(TABLE_LIST *table_list_par,
TABLE *table_par, List<Item> *fields_par,
List<Item> *update_fields, List<Item> *update_values,
enum_duplicates duplic, bool ignore);
2000-07-31 21:29:14 +02:00
~select_insert();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
virtual int prepare2(void);
2000-07-31 21:29:14 +02:00
bool send_data(List<Item> &items);
virtual void store_values(List<Item> &values);
2000-07-31 21:29:14 +02:00
void send_error(uint errcode,const char *err);
bool send_eof();
void abort();
/* not implemented: select_insert is never re-used in prepared statements */
void cleanup();
2000-07-31 21:29:14 +02:00
};
2000-07-31 21:29:14 +02:00
class select_create: public select_insert {
ORDER *group;
2004-07-16 00:15:55 +02:00
TABLE_LIST *create_table;
2000-07-31 21:29:14 +02:00
HA_CREATE_INFO *create_info;
A fix and test cases for Bug#4968 "Stored procedure crash if cursor opened on altered table" Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Test cases for bugs 4968, 19733, 6895 will be added in 5.0. Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table were not re-execution friendly: during their operation they used to modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO (note that code in 5.1 already creates and uses a copy of this structure in mysql_create_table()/alter_table(), but this approach didn't work well for CREATE TABLE SELECT statement).
2006-12-08 00:20:09 +01:00
Alter_info *alter_info;
2000-07-31 21:29:14 +02:00
MYSQL_LOCK *lock;
Field **field;
public:
select_create(TABLE_LIST *table_arg,
A fix and test cases for Bug#4968 "Stored procedure crash if cursor opened on altered table" Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Test cases for bugs 4968, 19733, 6895 will be added in 5.0. Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table were not re-execution friendly: during their operation they used to modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO (note that code in 5.1 already creates and uses a copy of this structure in mysql_create_table()/alter_table(), but this approach didn't work well for CREATE TABLE SELECT statement).
2006-12-08 00:20:09 +01:00
HA_CREATE_INFO *create_info_arg,
Alter_info *alter_info_arg,
List<Item> &select_fields,
enum_duplicates duplic, bool ignore)
:select_insert(NULL, NULL, &select_fields, 0, 0, duplic, ignore),
create_table(table_arg),
A fix and test cases for Bug#4968 "Stored procedure crash if cursor opened on altered table" Bug#19733 "Repeated alter, or repeated create/drop, fails" Bug#19182 "CREATE TABLE bar (m INT) SELECT n FROM foo; doesn't work from stored procedure." Bug#6895 "Prepared Statements: ALTER TABLE DROP COLUMN does nothing" Bug#22060 "ALTER TABLE x AUTO_INCREMENT=y in SP crashes server" Test cases for bugs 4968, 19733, 6895 will be added in 5.0. Re-execution of CREATE DATABASE, CREATE TABLE and ALTER TABLE statements in stored routines or as prepared statements caused incorrect results (and crashes in versions prior to 5.0.25). In 5.1 the problem occured only for CREATE DATABASE, CREATE TABLE SELECT and CREATE TABLE with INDEX/DATA DIRECTOY options). The problem of bugs 4968, 19733, 19282 and 6895 was that functions mysql_prepare_table, mysql_create_table and mysql_alter_table were not re-execution friendly: during their operation they used to modify contents of LEX (members create_info, alter_info, key_list, create_list), thus making the LEX unusable for the next execution. In particular, these functions removed processed columns and keys from create_list, key_list and drop_list. Search the code in sql_table.cc for drop_it.remove() and similar patterns to find evidence. The fix is to supply to these functions a usable copy of each of the above structures at every re-execution of an SQL statement. To simplify memory management, LEX::key_list and LEX::create_list were added to LEX::alter_info, a fresh copy of which is created for every execution. The problem of crashing bug 22060 stemmed from the fact that the above metnioned functions were not only modifying HA_CREATE_INFO structure in LEX, but also were changing it to point to areas in volatile memory of the execution memory root. The patch solves this problem by creating and using an on-stack copy of HA_CREATE_INFO (note that code in 5.1 already creates and uses a copy of this structure in mysql_create_table()/alter_table(), but this approach didn't work well for CREATE TABLE SELECT statement).
2006-12-08 00:20:09 +01:00
create_info(create_info_arg),
alter_info(alter_info_arg),
2004-07-16 00:15:55 +02:00
lock(0)
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
void store_values(List<Item> &values);
void send_error(uint errcode,const char *err);
2000-07-31 21:29:14 +02:00
bool send_eof();
void abort();
int prepare2(void) { return 0; }
2000-07-31 21:29:14 +02:00
};
#include <myisam.h>
2006-04-13 09:50:33 +02:00
/*
Param to create temporary tables when doing SELECT:s
NOTE
This structure is copied using memcpy as a part of JOIN.
*/
class TMP_TABLE_PARAM :public Sql_alloc
{
private:
/* Prevent use of these (not safe because of lists and copy_field) */
TMP_TABLE_PARAM(const TMP_TABLE_PARAM &);
void operator=(TMP_TABLE_PARAM &);
public:
List<Item> copy_funcs;
List<Item> save_copy_funcs;
Copy_field *copy_field, *copy_field_end;
Copy_field *save_copy_field, *save_copy_field_end;
byte *group_buff;
Item **items_to_copy; /* Fields in tmp table */
MI_COLUMNDEF *recinfo,*start_recinfo;
KEY *keyinfo;
ha_rows end_write_records;
uint field_count,sum_func_count,func_count;
uint hidden_field_count;
uint group_parts,group_length,group_null_parts;
uint quick_group;
bool using_indirect_summary_function;
/* If >0 convert all blob fields to varchar(convert_blob_length) */
2006-04-13 09:50:33 +02:00
uint convert_blob_length;
CHARSET_INFO *table_charset;
bool schema_table;
/*
True if GROUP BY and its aggregate functions are already computed
by a table access method (e.g. by loose index scan). In this case
query execution should not perform aggregation and should treat
aggregate functions as normal functions.
*/
bool precomputed_group_by;
Fixed bug#15560: GROUP_CONCAT wasn't ready for WITH ROLLUP queries The GROUP_CONCAT uses its own temporary table. When ROLLUP is present it creates the second copy of Item_func_group_concat. This copy receives the same list of arguments that original group_concat does. When the copy is set up the result_fields of functions from the argument list are reset to the temporary table of this copy. As a result of this action data from functions flow directly to the ROLLUP copy and the original group_concat functions shows wrong result. Since queries with COUNT(DISTINCT ...) use temporary tables to store the results the COUNT function they are also affected by this bug. The idea of the fix is to copy content of the result_field for the function under GROUP_CONCAT/COUNT from the first temporary table to the second one, rather than setting result_field to point to the second temporary table. To achieve this goal force_copy_fields flag is added to Item_func_group_concat and Item_sum_count_distinct classes. This flag is initialized to 0 and set to 1 into the make_unique() member function of both classes. To the TMP_TABLE_PARAM structure is modified to include the similar flag as well. The create_tmp_table() function passes that flag to create_tmp_field(). When the flag is set the create_tmp_field() function will set result_field as a source field and will not reset that result field to newly created field for Item_func_result_field and its descendants. Due to this there will be created copy func to copy data from old result_field to newly created field.
2006-03-29 21:30:34 +02:00
bool force_copy_fields;
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
2005-08-02 21:09:49 +02:00
group_length(0), group_null_parts(0), convert_blob_length(0),
2006-03-30 15:14:55 +02:00
schema_table(0), precomputed_group_by(0), force_copy_fields(0)
{}
~TMP_TABLE_PARAM()
{
cleanup();
}
void init(void);
inline void cleanup(void)
{
if (copy_field) /* Fix for Intel compiler */
{
delete [] copy_field;
save_copy_field= copy_field= 0;
}
}
};
class select_union :public select_result_interceptor
{
TMP_TABLE_PARAM tmp_table_param;
public:
TABLE *table;
select_union() :table(0) {}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
bool send_eof();
bool flush();
bool create_result_table(THD *thd, List<Item> *column_types,
bool is_distinct, ulonglong options,
const char *alias);
};
/* Base subselect interface class */
class select_subselect :public select_result_interceptor
{
protected:
Item_subselect *item;
public:
select_subselect(Item_subselect *item);
bool send_data(List<Item> &items)=0;
bool send_eof() { return 0; };
};
/* Single value subselect interface class */
2002-12-19 20:15:09 +01:00
class select_singlerow_subselect :public select_subselect
{
public:
select_singlerow_subselect(Item_subselect *item_arg)
:select_subselect(item_arg)
{}
bool send_data(List<Item> &items);
};
/* used in independent ALL/ANY optimisation */
class select_max_min_finder_subselect :public select_subselect
{
Item_cache *cache;
bool (select_max_min_finder_subselect::*op)();
bool fmax;
public:
select_max_min_finder_subselect(Item_subselect *item_arg, bool mx)
:select_subselect(item_arg), cache(0), fmax(mx)
{}
2005-02-11 22:33:52 +01:00
void cleanup();
bool send_data(List<Item> &items);
bool cmp_real();
bool cmp_int();
2005-02-11 22:33:52 +01:00
bool cmp_decimal();
bool cmp_str();
};
/* EXISTS subselect interface class */
class select_exists_subselect :public select_subselect
{
public:
select_exists_subselect(Item_subselect *item_arg)
:select_subselect(item_arg){}
bool send_data(List<Item> &items);
};
2000-07-31 21:29:14 +02:00
/* Structs used when sorting */
typedef struct st_sort_field {
Field *field; /* Field to sort */
Item *item; /* Item if not sorting fields */
uint length; /* Length of sort field */
uint suffix_length; /* Length suffix (0-4) */
2000-07-31 21:29:14 +02:00
Item_result result_type; /* Type of item */
bool reverse; /* if descending sort */
bool need_strxnfrm; /* If we have to use strxnfrm() */
2000-07-31 21:29:14 +02:00
} SORT_FIELD;
typedef struct st_sort_buffer {
uint index; /* 0 or 1 */
uint sort_orders;
uint change_pos; /* If sort-fields changed */
char **buff;
SORT_FIELD *sortorder;
} SORT_BUFFER;
/* Structure for db & table in sql_yacc */
class Table_ident :public Sql_alloc
{
A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files.
2006-06-26 22:47:52 +02:00
public:
2000-07-31 21:29:14 +02:00
LEX_STRING db;
LEX_STRING table;
2002-05-06 23:04:16 +02:00
SELECT_LEX_UNIT *sel;
2003-04-02 15:16:19 +02:00
inline Table_ident(THD *thd, LEX_STRING db_arg, LEX_STRING table_arg,
bool force)
2002-05-06 23:04:16 +02:00
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
2000-07-31 21:29:14 +02:00
{
2003-04-02 15:16:19 +02:00
if (!force && (thd->client_capabilities & CLIENT_NO_SCHEMA))
2000-07-31 21:29:14 +02:00
db.str=0;
else
db= db_arg;
}
2006-04-13 09:50:33 +02:00
inline Table_ident(LEX_STRING table_arg)
2002-05-06 23:04:16 +02:00
:table(table_arg), sel((SELECT_LEX_UNIT *)0)
{
db.str=0;
}
/*
This constructor is used only for the case when we create a derived
table. A derived table has no name and doesn't belong to any database.
Later, if there was an alias specified for the table, it will be set
by add_table_to_list.
*/
2006-04-13 09:50:33 +02:00
inline Table_ident(SELECT_LEX_UNIT *s) : sel(s)
2002-05-06 23:04:16 +02:00
{
2003-04-02 15:16:19 +02:00
/* We must have a table name here as this is used with add_table_to_list */
db.str= empty_c_string; /* a subject to casedn_str */
db.length= 0;
table.str= internal_table_name;
table.length=1;
2002-05-06 23:04:16 +02:00
}
bool is_derived_table() const { return test(sel); }
2000-07-31 21:29:14 +02:00
inline void change_db(char *db_name)
2002-05-06 23:04:16 +02:00
{
db.str= db_name; db.length= (uint) strlen(db_name);
}
2000-07-31 21:29:14 +02:00
};
// this is needed for user_vars hash
class user_var_entry
{
public:
user_var_entry() {} /* Remove gcc warning */
2000-07-31 21:29:14 +02:00
LEX_STRING name;
char *value;
ulong length;
query_id_t update_query_id, used_query_id;
2000-07-31 21:29:14 +02:00
Item_result type;
bool unsigned_flag;
2005-02-11 22:33:52 +01:00
double val_real(my_bool *null_value);
longlong val_int(my_bool *null_value) const;
String *val_str(my_bool *null_value, String *str, uint decimals);
2005-02-11 22:33:52 +01:00
my_decimal *val_decimal(my_bool *null_value, my_decimal *result);
DTCollation collation;
2000-07-31 21:29:14 +02:00
};
/*
2006-04-13 09:50:33 +02:00
Unique -- class for unique (removing of duplicates).
Puts all values to the TREE. If the tree becomes too big,
it's dumped to the file. User can request sorted values, or
just iterate through them. In the last case tree merging is performed in
memory simultaneously with iteration, so it should be ~2-3x faster.
*/
class Unique :public Sql_alloc
{
DYNAMIC_ARRAY file_ptrs;
ulong max_elements;
ulonglong max_in_memory_size;
IO_CACHE file;
TREE tree;
2001-09-15 15:22:34 +02:00
byte *record_pointers;
bool flush();
uint size;
public:
ulong elements;
Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
uint size_arg, ulonglong max_in_memory_size_arg);
~Unique();
ulong elements_in_tree() { return tree.elements_in_tree; }
inline bool unique_add(void *ptr)
{
2005-02-11 22:33:52 +01:00
DBUG_ENTER("unique_add");
DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements));
if (tree.elements_in_tree > max_elements && flush())
2005-02-11 22:33:52 +01:00
DBUG_RETURN(1);
DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg));
}
bool get(TABLE *table);
2006-04-13 09:50:33 +02:00
static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
ulonglong max_in_memory_size);
2006-04-13 09:50:33 +02:00
inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
ulonglong max_in_memory_size)
{
register ulonglong max_elems_in_tree=
(1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
}
void reset();
bool walk(tree_walk_action action, void *walk_action_arg);
friend int unique_write_to_file(gptr key, element_count count, Unique *unique);
friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique);
};
2003-08-11 21:44:43 +02:00
class multi_delete :public select_result_interceptor
2002-12-05 18:38:42 +01:00
{
TABLE_LIST *delete_tables, *table_being_deleted;
2003-08-11 21:44:43 +02:00
Unique **tempfiles;
ha_rows deleted, found;
2002-12-05 18:38:42 +01:00
uint num_of_tables;
int error;
bool do_delete;
/* True if at least one table we delete from is transactional */
bool transactional_tables;
/* True if at least one table we delete from is not transactional */
bool normal_tables;
bool delete_while_scanning;
/*
error handling (rollback and binlogging) can happen in send_eof()
so that afterward send_error() needs to find out that.
*/
bool error_handled;
2002-12-05 18:38:42 +01:00
public:
multi_delete(TABLE_LIST *dt, uint num_of_tables);
2002-12-05 18:38:42 +01:00
~multi_delete();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
int do_deletes();
2002-12-05 18:38:42 +01:00
bool send_eof();
};
class multi_update :public select_result_interceptor
{
TABLE_LIST *all_tables; /* query/update command tables */
TABLE_LIST *leaves; /* list of leves of join table tree */
TABLE_LIST *update_tables, *table_being_updated;
2003-04-02 15:16:19 +02:00
TABLE **tmp_tables, *main_table, *table_to_update;
TMP_TABLE_PARAM *tmp_table_param;
ha_rows updated, found;
List <Item> *fields, *values;
List <Item> **fields_for_table, **values_for_table;
uint table_count;
/*
List of tables referenced in the CHECK OPTION condition of
the updated view excluding the updated table.
*/
List <TABLE> unupdated_check_opt_tables;
Copy_field *copy_field;
enum enum_duplicates handle_duplicates;
bool do_update, trans_safe;
/* True if the update operation has made a change in a transactional table */
bool transactional_tables;
bool ignore;
/*
error handling (rollback and binlogging) can happen in send_eof()
so that afterward send_error() needs to find out that.
*/
bool error_handled;
public:
multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list,
List<Item> *fields, List<Item> *values,
enum_duplicates handle_duplicates, bool ignore);
~multi_update();
2002-12-05 18:38:42 +01:00
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
bool initialize_tables (JOIN *join);
void send_error(uint errcode,const char *err);
int do_updates (bool from_send_error);
bool send_eof();
};
class my_var : public Sql_alloc {
public:
LEX_STRING s;
2005-11-23 11:26:07 +01:00
#ifndef DBUG_OFF
/*
Routine to which this Item_splocal belongs. Used for checking if correct
runtime context is used for variable handling.
*/
sp_head *sp;
#endif
bool local;
uint offset;
enum_field_types type;
my_var (LEX_STRING& j, bool i, uint o, enum_field_types t)
:s(j), local(i), offset(o), type(t)
{}
~my_var() {}
};
class select_dumpvar :public select_result_interceptor {
2002-10-11 20:49:10 +02:00
ha_rows row_count;
public:
List<my_var> var_list;
/**
Creates a select_dumpvar to represent INTO <variable> with a defined
level of subquery nesting.
*/
select_dumpvar(uint nest_level_arg)
{
var_list.empty();
row_count= 0;
nest_level= nest_level_arg;
}
2002-10-11 20:49:10 +02:00
~select_dumpvar() {}
2002-10-16 15:55:08 +02:00
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
2002-10-11 20:49:10 +02:00
bool send_data(List<Item> &items);
bool send_eof();
virtual bool check_simple_select() const;
void cleanup();
2002-10-11 20:49:10 +02:00
};
/* Functions in sql_class.cc */
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
void mark_transaction_to_rollback(THD *thd, bool all);