2006-12-31 01:02:27 +01:00
|
|
|
/* Copyright (C) 2000-2006 MySQL AB
|
2001-12-06 13:10:51 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2006-12-23 20:17:15 +01:00
|
|
|
the Free Software Foundation; version 2 of the License.
|
2001-12-06 13:10:51 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
2001-12-06 13:10:51 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************
|
|
|
|
**
|
|
|
|
** This file implements classes defined in sql_class.h
|
|
|
|
** Especially the classes to handle a result from a select
|
|
|
|
**
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2005-05-26 12:09:14 +02:00
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
2000-07-31 21:29:14 +02:00
|
|
|
#pragma implementation // gcc: Class implementation
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "mysql_priv.h"
|
|
|
|
#include <m_ctype.h>
|
|
|
|
#include <sys/stat.h>
|
2001-03-14 07:07:12 +01:00
|
|
|
#include <thr_alarm.h>
|
2000-07-31 21:29:14 +02:00
|
|
|
#ifdef __WIN__
|
|
|
|
#include <io.h>
|
|
|
|
#endif
|
2002-03-15 22:57:31 +01:00
|
|
|
#include <mysys_err.h>
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2003-12-21 01:07:45 +01:00
|
|
|
#include "sp_rcontext.h"
|
|
|
|
#include "sp_cache.h"
|
2003-07-01 18:14:24 +02:00
|
|
|
|
2003-04-02 12:06:33 +02:00
|
|
|
/*
|
|
|
|
The following is used to initialise Table_ident with a internal
|
|
|
|
table name
|
|
|
|
*/
|
|
|
|
char internal_table_name[2]= "*";
|
2006-07-19 20:33:19 +02:00
|
|
|
char empty_c_string[1]= {0}; /* used for not defined db */
|
2003-04-02 12:06:33 +02:00
|
|
|
|
2005-10-25 11:02:48 +02:00
|
|
|
const char * const THD::DEFAULT_WHERE= "field list";
|
|
|
|
|
2002-10-24 22:33:24 +02:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/*****************************************************************************
|
|
|
|
** Instansiate templates
|
|
|
|
*****************************************************************************/
|
|
|
|
|
2005-06-22 11:08:28 +02:00
|
|
|
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
|
2000-07-31 21:29:14 +02:00
|
|
|
/* Used templates */
|
|
|
|
template class List<Key>;
|
|
|
|
template class List_iterator<Key>;
|
|
|
|
template class List<key_part_spec>;
|
|
|
|
template class List_iterator<key_part_spec>;
|
|
|
|
template class List<Alter_drop>;
|
|
|
|
template class List_iterator<Alter_drop>;
|
|
|
|
template class List<Alter_column>;
|
|
|
|
template class List_iterator<Alter_column>;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
** User variables
|
|
|
|
****************************************************************************/
|
|
|
|
|
2002-11-07 11:49:02 +01:00
|
|
|
extern "C" byte *get_var_key(user_var_entry *entry, uint *length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
*length=(uint) entry->name.length;
|
|
|
|
return (byte*) entry->name.str;
|
|
|
|
}
|
|
|
|
|
2002-11-07 11:49:02 +01:00
|
|
|
extern "C" void free_user_var(user_var_entry *entry)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
char *pos= (char*) entry+ALIGN_SIZE(sizeof(*entry));
|
|
|
|
if (entry->value && entry->value != pos)
|
|
|
|
my_free(entry->value, MYF(0));
|
|
|
|
my_free((char*) entry,MYF(0));
|
|
|
|
}
|
|
|
|
|
2004-04-21 12:15:43 +02:00
|
|
|
bool key_part_spec::operator==(const key_part_spec& other) const
|
|
|
|
{
|
|
|
|
return length == other.length && !strcmp(field_name, other.field_name);
|
|
|
|
}
|
|
|
|
|
2004-05-11 23:29:52 +02:00
|
|
|
|
|
|
|
/*
|
2004-05-15 10:57:40 +02:00
|
|
|
Test if a foreign key (= generated key) is a prefix of the given key
|
2004-05-11 23:29:52 +02:00
|
|
|
(ignoring key name, key type and order of columns)
|
|
|
|
|
|
|
|
NOTES:
|
|
|
|
This is only used to test if an index for a FOREIGN KEY exists
|
|
|
|
|
|
|
|
IMPLEMENTATION
|
|
|
|
We only compare field names
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 Generated key is a prefix of other key
|
|
|
|
1 Not equal
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool foreign_key_prefix(Key *a, Key *b)
|
2004-04-21 12:15:43 +02:00
|
|
|
{
|
2004-05-11 23:29:52 +02:00
|
|
|
/* Ensure that 'a' is the generated key */
|
|
|
|
if (a->generated)
|
|
|
|
{
|
|
|
|
if (b->generated && a->columns.elements > b->columns.elements)
|
2004-05-25 00:03:49 +02:00
|
|
|
swap_variables(Key*, a, b); // Put shorter key in 'a'
|
2004-05-11 23:29:52 +02:00
|
|
|
}
|
|
|
|
else
|
2004-04-21 12:15:43 +02:00
|
|
|
{
|
2004-05-11 23:29:52 +02:00
|
|
|
if (!b->generated)
|
|
|
|
return TRUE; // No foreign key
|
2004-05-25 00:03:49 +02:00
|
|
|
swap_variables(Key*, a, b); // Put generated key in 'a'
|
2004-05-11 23:29:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Test if 'a' is a prefix of 'b' */
|
|
|
|
if (a->columns.elements > b->columns.elements)
|
|
|
|
return TRUE; // Can't be prefix
|
|
|
|
|
|
|
|
List_iterator<key_part_spec> col_it1(a->columns);
|
|
|
|
List_iterator<key_part_spec> col_it2(b->columns);
|
|
|
|
const key_part_spec *col1, *col2;
|
|
|
|
|
|
|
|
#ifdef ENABLE_WHEN_INNODB_CAN_HANDLE_SWAPED_FOREIGN_KEY_COLUMNS
|
|
|
|
while ((col1= col_it1++))
|
|
|
|
{
|
|
|
|
bool found= 0;
|
|
|
|
col_it2.rewind();
|
|
|
|
while ((col2= col_it2++))
|
2004-04-21 12:15:43 +02:00
|
|
|
{
|
2004-05-11 23:29:52 +02:00
|
|
|
if (*col1 == *col2)
|
|
|
|
{
|
|
|
|
found= TRUE;
|
|
|
|
break;
|
|
|
|
}
|
2004-04-21 12:15:43 +02:00
|
|
|
}
|
2004-05-11 23:29:52 +02:00
|
|
|
if (!found)
|
|
|
|
return TRUE; // Error
|
|
|
|
}
|
|
|
|
return FALSE; // Is prefix
|
|
|
|
#else
|
|
|
|
while ((col1= col_it1++))
|
|
|
|
{
|
|
|
|
col2= col_it2++;
|
|
|
|
if (!(*col1 == *col2))
|
|
|
|
return TRUE;
|
2004-04-21 12:15:43 +02:00
|
|
|
}
|
2004-05-11 23:29:52 +02:00
|
|
|
return FALSE; // Is prefix
|
|
|
|
#endif
|
2004-04-21 12:15:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/****************************************************************************
|
|
|
|
** Thread specific functions
|
|
|
|
****************************************************************************/
|
2005-07-13 11:48:13 +02:00
|
|
|
|
2005-08-08 15:46:06 +02:00
|
|
|
Open_tables_state::Open_tables_state(ulong version_arg)
|
|
|
|
:version(version_arg)
|
2005-07-13 11:48:13 +02:00
|
|
|
{
|
|
|
|
reset_open_tables_state();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2004-08-24 17:00:45 +02:00
|
|
|
THD::THD()
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
:Statement(&main_lex, &main_mem_root, CONVENTIONAL_EXECUTION,
|
|
|
|
/* statement id */ 0),
|
2005-08-08 15:46:06 +02:00
|
|
|
Open_tables_state(refresh_version),
|
2005-07-19 20:21:12 +02:00
|
|
|
lock_id(&main_lock_id),
|
2005-08-15 17:15:12 +02:00
|
|
|
user_time(0), in_sub_stmt(0), global_read_lock(0), is_fatal_error(0),
|
2004-09-15 21:10:31 +02:00
|
|
|
rand_used(0), time_zone_used(0),
|
2006-10-03 11:38:16 +02:00
|
|
|
last_insert_id_used(0), last_insert_id_used_bin_log(0), insert_id_used(0),
|
|
|
|
clear_next_insert_id(0), in_lock_tables(0), bootstrap(0),
|
Bug#25411 (trigger code truncated), PART I
The issue found with bug 25411 is due to the function skip_rear_comments()
which damages the source code while implementing a work around.
The root cause of the problem is in the lexical analyser, which does not
process special comments properly.
For special comments like :
[1] aaa /*!50000 bbb */ ccc
since 5.0 is a version older that the current code, the parser is in lining
the content of the special comment, so that the query to process is
[2] aaa bbb ccc
However, the text of the query captured when processing a stored procedure,
stored function or trigger (or event in 5.1), can be after rebuilding it:
[3] aaa bbb */ ccc
which is wrong.
To fix bug 25411 properly, the lexical analyser needs to return [2] when
in lining special comments.
In order to implement this, some preliminary cleanup is required in the code,
which is implemented by this patch.
Before this change, the structure named LEX (or st_lex) contains attributes
that belong to lexical analysis, as well as attributes that represents the
abstract syntax tree (AST) of a statement.
Creating a new LEX structure for each statements (which makes sense for the
AST part) also re-initialized the lexical analysis phase each time, which
is conceptually wrong.
With this patch, the previous st_lex structure has been split in two:
- st_lex represents the Abstract Syntax Tree for a statement. The name "lex"
has not been changed to avoid a bigger impact in the code base.
- class lex_input_stream represents the internal state of the lexical
analyser, which by definition should *not* be reinitialized when parsing
multiple statements from the same input stream.
This change is a pre-requisite for bug 25411, since the implementation of
lex_input_stream will later improve to deal properly with special comments,
and this processing can not be done with the current implementation of
sp_head::reset_lex and sp_head::restore_lex, which interfere with the lexer.
This change set alone does not fix bug 25411.
2007-04-24 17:24:21 +02:00
|
|
|
derived_tables_processing(FALSE), spcont(NULL), m_lip(NULL)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
ulong tmp;
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/*
|
|
|
|
Pass nominal parameters to init_alloc_root only to ensure that
|
|
|
|
the destructor works OK in case of an error. The main_mem_root
|
|
|
|
will be re-initialized in init_for_queries().
|
|
|
|
*/
|
|
|
|
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
|
2005-09-02 15:21:19 +02:00
|
|
|
stmt_arena= this;
|
2005-11-24 19:13:13 +01:00
|
|
|
thread_stack= 0;
|
2005-09-15 21:29:07 +02:00
|
|
|
db= 0;
|
This will be pushed only after I fix the testsuite.
This is the main commit for Worklog tasks:
* A more dynamic binlog format which allows small changes (1064)
* Log session variables in Query_log_event (1063)
Below 5.0 means 5.0.0.
MySQL 5.0 is able to replicate FOREIGN_KEY_CHECKS, UNIQUE_KEY_CHECKS (for speed),
SQL_AUTO_IS_NULL, SQL_MODE. Not charsets (WL#1062), not some vars (I can only think
of SQL_SELECT_LIMIT, which deserves a special treatment). Note that this
works for queries, except LOAD DATA INFILE (for this it would have to wait
for Dmitri's push of WL#874, which in turns waits for the present push, so...
the deadlock must be broken!). Note that when Dmitri pushes WL#874 in 5.0.1,
5.0.0 won't be able to replicate a LOAD DATA INFILE from 5.0.1.
Apart from that, the new binlog format is designed so that it can tolerate
a little variation in the events (so that a 5.0.0 slave could replicate a
5.0.1 master, except for LOAD DATA INFILE unfortunately); that is, when I
later add replication of charsets it should break nothing. And when I later
add a UID to every event, it should break nothing.
The main change brought by this patch is a new type of event, Format_description_log_event,
which describes some lengthes in other event types. This event is needed for
the master/slave/mysqlbinlog to understand a 5.0 log. Thanks to this event,
we can later add more bytes to the header of every event without breaking compatibility.
Inside Query_log_event, we have some additional dynamic format, as every Query_log_event
can have a different number of status variables, stored as pairs (code, value); that's
how SQL_MODE and session variables and catalog are stored. Like this, we can later
add count of affected rows, charsets... and we can have options --don't-log-count-affected-rows
if we want.
MySQL 5.0 is able to run on 4.x relay logs, 4.x binlogs.
Upgrading a 4.x master to 5.0 is ok (no need to delete binlogs),
upgrading a 4.x slave to 5.0 is ok (no need to delete relay logs);
so both can be "hot" upgrades.
Upgrading a 3.23 master to 5.0 requires as much as upgrading it to 4.0.
3.23 and 4.x can't be slaves of 5.0.
So downgrading from 5.0 to 4.x may be complicated.
Log_event::log_pos is now the position of the end of the event, which is
more useful than the position of the beginning. We take care about compatibility
with <5.0 (in which log_pos is the beginning).
I added a short test for replication of SQL_MODE and some other variables.
TODO:
- after committing this, merge the latest 5.0 into it
- fix all tests
- update the manual with upgrade notes.
2003-12-18 01:09:05 +01:00
|
|
|
catalog= (char*)"std"; // the only catalog we have for now
|
2005-09-15 21:29:07 +02:00
|
|
|
main_security_ctx.init();
|
|
|
|
security_ctx= &main_security_ctx;
|
2003-12-01 16:14:40 +01:00
|
|
|
locked=some_tables_deleted=no_errors=password= 0;
|
2003-11-25 15:41:12 +01:00
|
|
|
query_start_used= 0;
|
2003-10-11 22:26:39 +02:00
|
|
|
count_cuted_fields= CHECK_FIELD_IGNORE;
|
2003-03-31 10:39:46 +02:00
|
|
|
killed= NOT_KILLED;
|
2003-11-27 18:51:53 +01:00
|
|
|
db_length= col_access=0;
|
2003-02-10 16:59:16 +01:00
|
|
|
query_error= tmp_table_used= 0;
|
2000-07-31 21:29:14 +02:00
|
|
|
next_insert_id=last_insert_id=0;
|
2004-09-24 18:39:25 +02:00
|
|
|
hash_clear(&handler_tables_hash);
|
2000-07-31 21:29:14 +02:00
|
|
|
tmp_table=0;
|
2000-10-03 13:18:03 +02:00
|
|
|
used_tables=0;
|
2003-11-27 18:51:53 +01:00
|
|
|
cuted_fields= sent_row_count= 0L;
|
2004-10-27 11:51:17 +02:00
|
|
|
limit_found_rows= 0;
|
2003-11-27 18:51:53 +01:00
|
|
|
statement_id_counter= 0UL;
|
2003-11-02 13:00:25 +01:00
|
|
|
// Must be reset to handle error with THD's created for init of mysqld
|
2003-12-04 20:08:26 +01:00
|
|
|
lex->current_select= 0;
|
2000-07-31 21:29:14 +02:00
|
|
|
start_time=(time_t) 0;
|
2006-10-18 18:17:41 +02:00
|
|
|
time_after_lock=(time_t) 0;
|
2000-10-27 06:11:55 +02:00
|
|
|
current_linfo = 0;
|
2000-11-14 07:43:02 +01:00
|
|
|
slave_thread = 0;
|
2002-12-29 22:46:48 +01:00
|
|
|
variables.pseudo_thread_id= 0;
|
2004-06-03 23:17:18 +02:00
|
|
|
one_shot_set= 0;
|
2001-07-21 00:22:54 +02:00
|
|
|
file_id = 0;
|
2005-03-23 21:39:20 +01:00
|
|
|
query_id= 0;
|
2002-11-21 01:07:14 +01:00
|
|
|
warn_id= 0;
|
2003-09-15 13:31:04 +02:00
|
|
|
db_charset= global_system_variables.collation_database;
|
2005-01-16 13:16:23 +01:00
|
|
|
bzero(ha_data, sizeof(ha_data));
|
2001-03-21 00:02:22 +01:00
|
|
|
mysys_var=0;
|
2005-08-25 15:34:34 +02:00
|
|
|
binlog_evt_union.do_union= FALSE;
|
2002-03-30 20:36:05 +01:00
|
|
|
#ifndef DBUG_OFF
|
|
|
|
dbug_sentry=THD_SENTRY_MAGIC;
|
2002-12-16 14:33:29 +01:00
|
|
|
#endif
|
2005-04-12 23:08:19 +02:00
|
|
|
#ifndef EMBEDDED_LIBRARY
|
2001-03-21 00:02:22 +01:00
|
|
|
net.vio=0;
|
2002-12-16 14:33:29 +01:00
|
|
|
#endif
|
2005-10-14 15:34:52 +02:00
|
|
|
client_capabilities= 0; // minimalistic client
|
2005-04-12 23:08:19 +02:00
|
|
|
net.last_error[0]=0; // If error on boot
|
2007-03-06 22:55:52 +01:00
|
|
|
#ifdef HAVE_QUERY_CACHE
|
2006-08-22 09:47:52 +02:00
|
|
|
query_cache_init_query(&net); // If error on boot
|
2007-03-06 22:55:52 +01:00
|
|
|
#endif
|
2001-03-21 00:02:22 +01:00
|
|
|
ull=0;
|
2005-02-24 22:33:42 +01:00
|
|
|
system_thread= cleanup_done= abort_on_warning= no_warnings_for_error= 0;
|
2003-04-26 19:43:28 +02:00
|
|
|
peer_port= 0; // For SHOW PROCESSLIST
|
2001-03-21 00:02:22 +01:00
|
|
|
#ifdef __WIN__
|
|
|
|
real_id = 0;
|
|
|
|
#endif
|
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
|
|
active_vio = 0;
|
2005-04-12 23:08:19 +02:00
|
|
|
#endif
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_init(&LOCK_delete, MY_MUTEX_INIT_FAST);
|
2001-03-21 00:02:22 +01:00
|
|
|
|
|
|
|
/* Variables with default values */
|
|
|
|
proc_info="login";
|
2005-10-25 11:02:48 +02:00
|
|
|
where= THD::DEFAULT_WHERE;
|
2001-03-21 00:02:22 +01:00
|
|
|
server_id = ::server_id;
|
2001-08-03 23:57:53 +02:00
|
|
|
slave_net = 0;
|
2000-07-31 21:29:14 +02:00
|
|
|
command=COM_CONNECT;
|
2003-07-18 16:57:21 +02:00
|
|
|
*scramble= '\0';
|
2001-03-21 00:02:22 +01:00
|
|
|
|
2002-11-16 19:19:10 +01:00
|
|
|
init();
|
2001-03-21 00:02:22 +01:00
|
|
|
/* Initialize sub structures */
|
2004-10-21 09:58:03 +02:00
|
|
|
init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE);
|
2002-05-15 12:50:38 +02:00
|
|
|
user_connect=(USER_CONN *)0;
|
2003-05-12 15:36:31 +02:00
|
|
|
hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
|
2000-07-31 21:29:14 +02:00
|
|
|
(hash_get_key) get_var_key,
|
2003-05-12 15:36:31 +02:00
|
|
|
(hash_free_key) free_user_var, 0);
|
2003-07-03 15:58:37 +02:00
|
|
|
|
2003-10-21 12:08:35 +02:00
|
|
|
sp_proc_cache= NULL;
|
|
|
|
sp_func_cache= NULL;
|
2002-10-02 12:33:08 +02:00
|
|
|
|
2003-01-30 18:39:54 +01:00
|
|
|
/* For user vars replication*/
|
|
|
|
if (opt_bin_log)
|
|
|
|
my_init_dynamic_array(&user_var_events,
|
2005-01-16 13:16:23 +01:00
|
|
|
sizeof(BINLOG_USER_VAR_EVENT *), 16, 16);
|
2003-01-30 18:39:54 +01:00
|
|
|
else
|
|
|
|
bzero((char*) &user_var_events, sizeof(user_var_events));
|
|
|
|
|
2002-12-11 08:17:51 +01:00
|
|
|
/* Protocol */
|
|
|
|
protocol= &protocol_simple; // Default protocol
|
|
|
|
protocol_simple.init(this);
|
|
|
|
protocol_prep.init(this);
|
|
|
|
|
2003-10-13 10:20:19 +02:00
|
|
|
tablespace_op=FALSE;
|
2006-12-14 23:51:37 +01:00
|
|
|
tmp= sql_rnd_with_mutex();
|
|
|
|
randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
|
2006-07-10 15:27:03 +02:00
|
|
|
substitute_null_with_insert_id = FALSE;
|
2005-07-19 20:21:12 +02:00
|
|
|
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
|
|
|
|
thr_lock_owner_init(&main_lock_id, &lock_info);
|
Bug#8407 (Stored functions/triggers ignore exception handler)
Bug 18914 (Calling certain SPs from triggers fail)
Bug 20713 (Functions will not not continue for SQLSTATE VALUE '42S02')
Bug 21825 (Incorrect message error deleting records in a table with a
trigger for inserting)
Bug 22580 (DROP TABLE in nested stored procedure causes strange dependency
error)
Bug 25345 (Cursors from Functions)
This fix resolves a long standing issue originally reported with bug 8407,
which affect the behavior of Stored Procedures, Stored Functions and Trigger
in many different ways, causing symptoms reported by all the bugs listed.
In all cases, the root cause of the problem traces back to 8407 and how the
server locks tables involved with sub statements.
Prior to this fix, the implementation of stored routines would:
- compute the transitive closure of all the tables referenced by a top level
statement
- open and lock all the tables involved
- execute the top level statement
"transitive closure of tables" means collecting:
- all the tables,
- all the stored functions,
- all the views,
- all the table triggers
- all the stored procedures
involved, and recursively inspect these objects definition to find more
references to more objects, until the list of every object referenced does
not grow any more.
This mechanism is known as "pre-locking" tables before execution.
The motivation for locking all the tables (possibly) used at once is to
prevent dead locks.
One problem with this approach is that, if the execution path the code
really takes during runtime does not use a given table, and if the table is
missing, the server would not execute the statement.
This in particular has a major impact on triggers, since a missing table
referenced by an update/delete trigger would prevent an insert trigger to run.
Another problem is that stored routines might define SQL exception handlers
to deal with missing tables, but the server implementation would never give
user code a chance to execute this logic, since the routine is never
executed when a missing table cause the pre-locking code to fail.
With this fix, the internal implementation of the pre-locking code has been
relaxed of some constraints, so that failure to open a table does not
necessarily prevent execution of a stored routine.
In particular, the pre-locking mechanism is now behaving as follows:
1) the first step, to compute the transitive closure of all the tables
possibly referenced by a statement, is unchanged.
2) the next step, which is to open all the tables involved, only attempts
to open the tables added by the pre-locking code, but silently fails without
reporting any error or invoking any exception handler is the table is not
present. This is achieved by trapping internal errors with
Prelock_error_handler
3) the locking step only locks tables that were successfully opened.
4) when executing sub statements, the list of tables used by each statements
is evaluated as before. The tables needed by the sub statement are expected
to be already opened and locked. Statement referencing tables that were not
opened in step 2) will fail to find the table in the open list, and only at
this point will execution of the user code fail.
5) when a runtime exception is raised at 4), the instruction continuation
destination (the next instruction to execute in case of SQL continue
handlers) is evaluated.
This is achieved with sp_instr::exec_open_and_lock_tables()
6) if a user exception handler is present in the stored routine, that
handler is invoked as usual, so that ER_NO_SUCH_TABLE exceptions can be
trapped by stored routines. If no handler exists, then the runtime execution
will fail as expected.
With all these changes, a side effect is that view security is impacted, in
two different ways.
First, a view defined as "select stored_function()", where the stored
function references a table that may not exist, is considered valid.
The rationale is that, because the stored function might trap exceptions
during execution and still return a valid result, there is no way to decide
when the view is created if a missing table really cause the view to be invalid.
Secondly, testing for existence of tables is now done later during
execution. View security, which consist of trapping errors and return a
generic ER_VIEW_INVALID (to prevent disclosing information) was only
implemented at very specific phases covering *opening* tables, but not
covering the runtime execution. Because of this existing limitation,
errors that were previously trapped and converted into ER_VIEW_INVALID are
not trapped, causing table names to be reported to the user.
This change is exposing an existing problem, which is independent and will
be resolved separately.
2007-03-06 03:42:07 +01:00
|
|
|
|
|
|
|
m_internal_handler= NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void THD::push_internal_handler(Internal_error_handler *handler)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
TODO: The current implementation is limited to 1 handler at a time only.
|
|
|
|
THD and sp_rcontext need to be modified to use a common handler stack.
|
|
|
|
*/
|
|
|
|
DBUG_ASSERT(m_internal_handler == NULL);
|
|
|
|
m_internal_handler= handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool THD::handle_error(uint sql_errno,
|
|
|
|
MYSQL_ERROR::enum_warning_level level)
|
|
|
|
{
|
|
|
|
if (m_internal_handler)
|
|
|
|
{
|
|
|
|
return m_internal_handler->handle_error(sql_errno, level, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
return FALSE; // 'FALSE', as per coding style
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void THD::pop_internal_handler()
|
|
|
|
{
|
|
|
|
DBUG_ASSERT(m_internal_handler != NULL);
|
|
|
|
m_internal_handler= NULL;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2002-11-16 19:19:10 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
Init common variables that has to be reset on start and on change_user
|
|
|
|
*/
|
|
|
|
|
|
|
|
void THD::init(void)
|
|
|
|
{
|
2002-12-02 16:52:22 +01:00
|
|
|
pthread_mutex_lock(&LOCK_global_system_variables);
|
|
|
|
variables= global_system_variables;
|
2003-11-03 13:01:59 +01:00
|
|
|
variables.time_format= date_time_format_copy((THD*) 0,
|
|
|
|
variables.time_format);
|
|
|
|
variables.date_format= date_time_format_copy((THD*) 0,
|
|
|
|
variables.date_format);
|
|
|
|
variables.datetime_format= date_time_format_copy((THD*) 0,
|
|
|
|
variables.datetime_format);
|
2002-12-02 16:52:22 +01:00
|
|
|
pthread_mutex_unlock(&LOCK_global_system_variables);
|
2002-11-16 19:19:10 +01:00
|
|
|
server_status= SERVER_STATUS_AUTOCOMMIT;
|
2005-06-24 03:29:56 +02:00
|
|
|
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
|
|
|
|
server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
|
2002-11-16 19:19:10 +01:00
|
|
|
options= thd_startup_options;
|
2007-03-29 20:06:32 +02:00
|
|
|
no_trans_update.stmt= no_trans_update.all= FALSE;
|
2002-11-16 19:19:10 +01:00
|
|
|
open_options=ha_open_options;
|
2002-12-02 16:52:22 +01:00
|
|
|
update_lock_default= (variables.low_priority_updates ?
|
|
|
|
TL_WRITE_LOW_PRIORITY :
|
|
|
|
TL_WRITE);
|
2002-11-16 19:19:10 +01:00
|
|
|
session_tx_isolation= (enum_tx_isolation) variables.tx_isolation;
|
2002-12-02 16:52:22 +01:00
|
|
|
warn_list.empty();
|
|
|
|
bzero((char*) warn_count, sizeof(warn_count));
|
|
|
|
total_warn_count= 0;
|
2003-08-19 15:00:12 +02:00
|
|
|
update_charset();
|
2004-09-13 15:48:01 +02:00
|
|
|
bzero((char *) &status_var, sizeof(status_var));
|
2002-11-16 19:19:10 +01:00
|
|
|
}
|
|
|
|
|
2003-03-19 20:23:13 +01:00
|
|
|
|
2003-12-21 20:26:45 +01:00
|
|
|
/*
|
|
|
|
Init THD for query processing.
|
|
|
|
This has to be called once before we call mysql_parse.
|
|
|
|
See also comments in sql_class.h.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void THD::init_for_queries()
|
|
|
|
{
|
2007-02-19 14:57:54 +01:00
|
|
|
set_time();
|
2004-09-03 17:11:09 +02:00
|
|
|
ha_enable_transaction(this,TRUE);
|
2004-09-23 11:48:17 +02:00
|
|
|
|
2004-11-08 00:13:54 +01:00
|
|
|
reset_root_defaults(mem_root, variables.query_alloc_block_size,
|
2004-09-23 11:48:17 +02:00
|
|
|
variables.query_prealloc_size);
|
2005-01-16 13:16:23 +01:00
|
|
|
#ifdef USING_TRANSACTIONS
|
2004-09-23 11:48:17 +02:00
|
|
|
reset_root_defaults(&transaction.mem_root,
|
|
|
|
variables.trans_alloc_block_size,
|
|
|
|
variables.trans_prealloc_size);
|
2005-01-16 13:16:23 +01:00
|
|
|
#endif
|
2005-08-12 21:15:01 +02:00
|
|
|
transaction.xid_state.xid.null();
|
|
|
|
transaction.xid_state.in_thd=1;
|
2003-12-21 20:26:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-11-16 19:19:10 +01:00
|
|
|
/*
|
|
|
|
Do what's needed when one invokes change user
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
change_user()
|
|
|
|
|
|
|
|
IMPLEMENTATION
|
|
|
|
Reset all resources that are connection specific
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
void THD::change_user(void)
|
|
|
|
{
|
|
|
|
cleanup();
|
2002-11-21 21:25:53 +01:00
|
|
|
cleanup_done= 0;
|
2002-11-16 19:19:10 +01:00
|
|
|
init();
|
2004-09-22 13:50:07 +02:00
|
|
|
stmt_map.reset();
|
2003-05-12 15:36:31 +02:00
|
|
|
hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
|
2002-11-16 19:19:10 +01:00
|
|
|
(hash_get_key) get_var_key,
|
2002-11-21 21:25:53 +01:00
|
|
|
(hash_free_key) free_user_var, 0);
|
2003-10-21 12:08:35 +02:00
|
|
|
sp_cache_clear(&sp_proc_cache);
|
|
|
|
sp_cache_clear(&sp_func_cache);
|
2002-11-16 19:19:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2001-08-21 19:06:00 +02:00
|
|
|
/* Do operations that may take a long time */
|
|
|
|
|
|
|
|
void THD::cleanup(void)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2001-08-21 19:06:00 +02:00
|
|
|
DBUG_ENTER("THD::cleanup");
|
2005-04-06 18:43:35 +02:00
|
|
|
#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
|
2005-08-12 21:15:01 +02:00
|
|
|
if (transaction.xid_state.xa_state == XA_PREPARED)
|
|
|
|
{
|
|
|
|
#error xid_state in the cache should be replaced by the allocated value
|
|
|
|
}
|
2005-04-06 18:43:35 +02:00
|
|
|
#endif
|
2005-08-12 21:15:01 +02:00
|
|
|
{
|
2005-04-04 00:50:05 +02:00
|
|
|
ha_rollback(this);
|
2005-08-12 21:15:01 +02:00
|
|
|
xid_cache_delete(&transaction.xid_state);
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
if (locked_tables)
|
|
|
|
{
|
|
|
|
lock=locked_tables; locked_tables=0;
|
|
|
|
close_thread_tables(this);
|
|
|
|
}
|
2004-09-24 18:39:25 +02:00
|
|
|
mysql_ha_flush(this, (TABLE_LIST*) 0,
|
2005-11-15 21:57:02 +01:00
|
|
|
MYSQL_HA_CLOSE_FINAL | MYSQL_HA_FLUSH_ALL, FALSE);
|
2004-09-24 18:39:25 +02:00
|
|
|
hash_free(&handler_tables_hash);
|
2005-08-30 15:22:19 +02:00
|
|
|
delete_dynamic(&user_var_events);
|
|
|
|
hash_free(&user_vars);
|
2000-07-31 21:29:14 +02:00
|
|
|
close_temporary_tables(this);
|
2003-11-03 13:01:59 +01:00
|
|
|
my_free((char*) variables.time_format, MYF(MY_ALLOW_ZERO_PTR));
|
|
|
|
my_free((char*) variables.date_format, MYF(MY_ALLOW_ZERO_PTR));
|
|
|
|
my_free((char*) variables.datetime_format, MYF(MY_ALLOW_ZERO_PTR));
|
2005-09-09 09:43:26 +02:00
|
|
|
|
2003-10-21 12:08:35 +02:00
|
|
|
sp_cache_clear(&sp_proc_cache);
|
2005-09-09 09:43:26 +02:00
|
|
|
sp_cache_clear(&sp_func_cache);
|
|
|
|
|
2002-11-16 19:19:10 +01:00
|
|
|
if (global_read_lock)
|
|
|
|
unlock_global_read_lock(this);
|
|
|
|
if (ull)
|
2001-03-06 14:24:08 +01:00
|
|
|
{
|
2002-11-16 19:19:10 +01:00
|
|
|
pthread_mutex_lock(&LOCK_user_locks);
|
|
|
|
item_user_lock_release(ull);
|
|
|
|
pthread_mutex_unlock(&LOCK_user_locks);
|
|
|
|
ull= 0;
|
2001-03-06 14:24:08 +01:00
|
|
|
}
|
2003-04-02 20:42:28 +02:00
|
|
|
|
2001-08-21 19:06:00 +02:00
|
|
|
cleanup_done=1;
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2002-11-16 19:19:10 +01:00
|
|
|
|
2001-08-21 19:06:00 +02:00
|
|
|
THD::~THD()
|
|
|
|
{
|
2002-03-30 20:36:05 +01:00
|
|
|
THD_CHECK_SENTRY(this);
|
2001-08-21 19:06:00 +02:00
|
|
|
DBUG_ENTER("~THD()");
|
2002-08-22 15:50:58 +02:00
|
|
|
/* Ensure that no one is using THD */
|
|
|
|
pthread_mutex_lock(&LOCK_delete);
|
|
|
|
pthread_mutex_unlock(&LOCK_delete);
|
2004-09-13 15:48:01 +02:00
|
|
|
add_to_status(&global_status_var, &status_var);
|
2002-08-22 15:50:58 +02:00
|
|
|
|
2001-08-21 19:06:00 +02:00
|
|
|
/* Close connection */
|
2005-04-04 00:50:05 +02:00
|
|
|
#ifndef EMBEDDED_LIBRARY
|
2001-08-21 19:06:00 +02:00
|
|
|
if (net.vio)
|
|
|
|
{
|
|
|
|
vio_delete(net.vio);
|
2005-04-04 00:50:05 +02:00
|
|
|
net_end(&net);
|
2001-08-21 19:06:00 +02:00
|
|
|
}
|
2002-12-16 14:33:29 +01:00
|
|
|
#endif
|
2006-04-12 23:46:44 +02:00
|
|
|
stmt_map.reset(); /* close all prepared statements */
|
2005-07-19 20:21:12 +02:00
|
|
|
DBUG_ASSERT(lock_info.n_cursors == 0);
|
2001-08-21 19:06:00 +02:00
|
|
|
if (!cleanup_done)
|
|
|
|
cleanup();
|
2005-01-16 13:16:23 +01:00
|
|
|
|
2005-04-04 00:50:05 +02:00
|
|
|
ha_close_connection(this);
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2005-09-15 21:29:07 +02:00
|
|
|
DBUG_PRINT("info", ("freeing security context"));
|
|
|
|
main_security_ctx.destroy();
|
2004-01-07 18:30:15 +01:00
|
|
|
safeFree(db);
|
2002-10-02 12:33:08 +02:00
|
|
|
free_root(&warn_root,MYF(0));
|
2005-01-16 13:16:23 +01:00
|
|
|
#ifdef USING_TRANSACTIONS
|
2002-03-15 22:57:31 +01:00
|
|
|
free_root(&transaction.mem_root,MYF(0));
|
2005-01-16 13:16:23 +01:00
|
|
|
#endif
|
2000-07-31 21:29:14 +02:00
|
|
|
mysys_var=0; // Safety (shouldn't be needed)
|
2002-08-22 15:50:58 +02:00
|
|
|
pthread_mutex_destroy(&LOCK_delete);
|
2002-03-30 20:36:05 +01:00
|
|
|
#ifndef DBUG_OFF
|
2003-12-04 20:08:26 +01:00
|
|
|
dbug_sentry= THD_SENTRY_GONE;
|
2001-03-13 04:17:32 +01:00
|
|
|
#endif
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
free_root(&main_mem_root, MYF(0));
|
2000-07-31 21:29:14 +02:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2002-08-22 15:50:58 +02:00
|
|
|
|
2004-09-13 15:48:01 +02:00
|
|
|
/*
|
2006-01-10 17:56:23 +01:00
|
|
|
Add all status variables to another status variable array
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
add_to_status()
|
|
|
|
to_var add to this array
|
|
|
|
from_var from this array
|
2004-09-13 15:48:01 +02:00
|
|
|
|
|
|
|
NOTES
|
|
|
|
This function assumes that all variables are long/ulong.
|
|
|
|
If this assumption will change, then we have to explictely add
|
|
|
|
the other variables after the while loop
|
|
|
|
*/
|
|
|
|
|
|
|
|
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
|
|
|
|
{
|
2006-10-30 13:35:57 +01:00
|
|
|
ulong *end= (ulong*) ((byte*) to_var +
|
|
|
|
offsetof(STATUS_VAR, last_system_status_var) +
|
2004-09-13 15:48:01 +02:00
|
|
|
sizeof(ulong));
|
|
|
|
ulong *to= (ulong*) to_var, *from= (ulong*) from_var;
|
|
|
|
|
|
|
|
while (to != end)
|
|
|
|
*(to++)+= *(from++);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-03-31 10:39:46 +02:00
|
|
|
void THD::awake(THD::killed_state state_to_set)
|
2001-03-14 07:07:12 +01:00
|
|
|
{
|
2002-03-30 20:36:05 +01:00
|
|
|
THD_CHECK_SENTRY(this);
|
2002-08-22 15:50:58 +02:00
|
|
|
safe_mutex_assert_owner(&LOCK_delete);
|
|
|
|
|
2003-03-31 10:39:46 +02:00
|
|
|
killed= state_to_set;
|
|
|
|
if (state_to_set != THD::KILL_QUERY)
|
2005-12-14 18:42:08 +01:00
|
|
|
{
|
2003-03-31 10:39:46 +02:00
|
|
|
thr_alarm_kill(real_id);
|
2001-03-14 07:07:12 +01:00
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
2005-12-14 18:42:08 +01:00
|
|
|
close_active_vio();
|
2001-03-14 07:07:12 +01:00
|
|
|
#endif
|
2005-12-14 18:42:08 +01:00
|
|
|
}
|
2001-03-14 07:07:12 +01:00
|
|
|
if (mysys_var)
|
2002-06-12 14:04:18 +02:00
|
|
|
{
|
|
|
|
pthread_mutex_lock(&mysys_var->mutex);
|
|
|
|
if (!system_thread) // Don't abort locks
|
|
|
|
mysys_var->abort=1;
|
|
|
|
/*
|
|
|
|
This broadcast could be up in the air if the victim thread
|
|
|
|
exits the cond in the time between read and broadcast, but that is
|
|
|
|
ok since all we want to do is to make the victim thread get out
|
|
|
|
of waiting on current_cond.
|
2004-07-30 00:53:25 +02:00
|
|
|
If we see a non-zero current_cond: it cannot be an old value (because
|
|
|
|
then exit_cond() should have run and it can't because we have mutex); so
|
|
|
|
it is the true value but maybe current_mutex is not yet non-zero (we're
|
|
|
|
in the middle of enter_cond() and there is a "memory order
|
|
|
|
inversion"). So we test the mutex too to not lock 0.
|
2004-08-24 17:00:45 +02:00
|
|
|
|
2004-07-30 00:53:25 +02:00
|
|
|
Note that there is a small chance we fail to kill. If victim has locked
|
2004-08-24 17:00:45 +02:00
|
|
|
current_mutex, but hasn't yet entered enter_cond() (which means that
|
|
|
|
current_cond and current_mutex are 0), then the victim will not get
|
|
|
|
a signal and it may wait "forever" on the cond (until
|
|
|
|
we issue a second KILL or the status it's waiting for happens).
|
|
|
|
It's true that we have set its thd->killed but it may not
|
2004-07-30 00:53:25 +02:00
|
|
|
see it immediately and so may have time to reach the cond_wait().
|
2002-06-12 14:04:18 +02:00
|
|
|
*/
|
2004-07-30 00:53:25 +02:00
|
|
|
if (mysys_var->current_cond && mysys_var->current_mutex)
|
2001-03-14 07:07:12 +01:00
|
|
|
{
|
2002-06-12 14:04:18 +02:00
|
|
|
pthread_mutex_lock(mysys_var->current_mutex);
|
|
|
|
pthread_cond_broadcast(mysys_var->current_cond);
|
|
|
|
pthread_mutex_unlock(mysys_var->current_mutex);
|
2001-03-14 07:07:12 +01:00
|
|
|
}
|
2002-06-12 14:04:18 +02:00
|
|
|
pthread_mutex_unlock(&mysys_var->mutex);
|
|
|
|
}
|
2001-03-14 07:07:12 +01:00
|
|
|
}
|
|
|
|
|
2002-06-12 14:04:18 +02:00
|
|
|
/*
|
|
|
|
Remember the location of thread info, the structure needed for
|
|
|
|
sql_alloc() and the structure for the net buffer
|
|
|
|
*/
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
bool THD::store_globals()
|
|
|
|
{
|
2005-11-23 19:18:10 +01:00
|
|
|
/*
|
|
|
|
Assert that thread_stack is initialized: it's necessary to be able
|
|
|
|
to track stack overrun.
|
|
|
|
*/
|
|
|
|
DBUG_ASSERT(this->thread_stack);
|
|
|
|
|
2002-09-05 15:17:08 +02:00
|
|
|
if (my_pthread_setspecific_ptr(THR_THD, this) ||
|
2002-10-02 16:55:12 +02:00
|
|
|
my_pthread_setspecific_ptr(THR_MALLOC, &mem_root))
|
2002-09-05 15:17:08 +02:00
|
|
|
return 1;
|
|
|
|
mysys_var=my_thread_var;
|
|
|
|
dbug_thread_id=my_thread_id();
|
2003-10-31 23:20:23 +01:00
|
|
|
/*
|
|
|
|
By default 'slave_proxy_id' is 'thread_id'. They may later become different
|
|
|
|
if this is the slave SQL thread.
|
|
|
|
*/
|
2003-11-04 13:09:03 +01:00
|
|
|
variables.pseudo_thread_id= thread_id;
|
2005-07-31 11:49:55 +02:00
|
|
|
/*
|
|
|
|
We have to call thr_lock_info_init() again here as THD may have been
|
|
|
|
created in another thread
|
|
|
|
*/
|
2005-07-19 20:21:12 +02:00
|
|
|
thr_lock_info_init(&lock_info);
|
2002-09-05 15:17:08 +02:00
|
|
|
return 0;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2002-06-12 14:04:18 +02:00
|
|
|
|
2006-10-02 12:28:23 +02:00
|
|
|
/*
|
|
|
|
Cleanup after query.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
THD::cleanup_after_query()
|
2004-09-15 21:10:31 +02:00
|
|
|
|
2006-10-02 12:28:23 +02:00
|
|
|
DESCRIPTION
|
2006-10-03 11:38:16 +02:00
|
|
|
This function is used to reset thread data to its default state.
|
2006-10-02 12:28:23 +02:00
|
|
|
|
|
|
|
NOTE
|
|
|
|
This function is not suitable for setting thread data to some
|
|
|
|
non-default values, as there is only one replication thread, so
|
|
|
|
different master threads may overwrite data of each other on
|
|
|
|
slave.
|
|
|
|
*/
|
2006-10-03 11:38:16 +02:00
|
|
|
|
2004-09-15 21:10:31 +02:00
|
|
|
void THD::cleanup_after_query()
|
|
|
|
{
|
2006-10-02 12:28:23 +02:00
|
|
|
last_insert_id_used= FALSE;
|
2004-09-15 21:10:31 +02:00
|
|
|
if (clear_next_insert_id)
|
|
|
|
{
|
|
|
|
clear_next_insert_id= 0;
|
|
|
|
next_insert_id= 0;
|
|
|
|
}
|
2007-03-09 18:18:28 +01:00
|
|
|
/*
|
|
|
|
Reset rand_used so that detection of calls to rand() will save random
|
|
|
|
seeds if needed by the slave.
|
|
|
|
|
|
|
|
Do not reset rand_used if inside a stored function or trigger because
|
|
|
|
only the call to these operations is logged. Thus only the calling
|
|
|
|
statement needs to detect rand() calls made by its substatements. These
|
|
|
|
substatements must not set rand_used to 0 because it would remove the
|
|
|
|
detection of rand() by the calling statement.
|
|
|
|
*/
|
|
|
|
if (!in_sub_stmt)
|
|
|
|
rand_used= 0;
|
2004-09-15 21:10:31 +02:00
|
|
|
/* Free Items that were created during this execution */
|
2005-06-23 18:22:08 +02:00
|
|
|
free_items();
|
2005-10-25 11:02:48 +02:00
|
|
|
/* Reset where. */
|
|
|
|
where= THD::DEFAULT_WHERE;
|
2004-09-15 21:10:31 +02:00
|
|
|
}
|
|
|
|
|
2006-10-03 11:38:16 +02:00
|
|
|
|
2003-08-18 23:08:08 +02:00
|
|
|
/*
|
|
|
|
Convert a string to another character set
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
convert_string()
|
|
|
|
to Store new allocated string here
|
|
|
|
to_cs New character set for allocated string
|
|
|
|
from String to convert
|
|
|
|
from_length Length of string to convert
|
|
|
|
from_cs Original character set
|
|
|
|
|
|
|
|
NOTES
|
|
|
|
to will be 0-terminated to make it easy to pass to system funcs
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 ok
|
|
|
|
1 End of memory.
|
|
|
|
In this case to->str will point to 0 and to->length will be 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
|
|
|
|
const char *from, uint from_length,
|
|
|
|
CHARSET_INFO *from_cs)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("convert_string");
|
|
|
|
size_s new_length= to_cs->mbmaxlen * from_length;
|
2004-10-29 14:00:39 +02:00
|
|
|
uint dummy_errors;
|
2003-08-18 23:08:08 +02:00
|
|
|
if (!(to->str= alloc(new_length+1)))
|
|
|
|
{
|
|
|
|
to->length= 0; // Safety fix
|
|
|
|
DBUG_RETURN(1); // EOM
|
|
|
|
}
|
|
|
|
to->length= copy_and_convert((char*) to->str, new_length, to_cs,
|
2004-10-29 14:00:39 +02:00
|
|
|
from, from_length, from_cs, &dummy_errors);
|
2003-08-18 23:08:08 +02:00
|
|
|
to->str[to->length]=0; // Safety
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-05-25 00:03:49 +02:00
|
|
|
/*
|
|
|
|
Convert string from source character set to target character set inplace.
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
THD::convert_string
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
Convert string using convert_buffer - buffer for character set
|
|
|
|
conversion shared between all protocols.
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
0 ok
|
|
|
|
!0 out of memory
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs)
|
|
|
|
{
|
2004-10-29 14:00:39 +02:00
|
|
|
uint dummy_errors;
|
|
|
|
if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors))
|
2004-05-25 00:03:49 +02:00
|
|
|
return TRUE;
|
|
|
|
/* If convert_buffer >> s copying is more efficient long term */
|
|
|
|
if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 ||
|
|
|
|
!s->is_alloced())
|
|
|
|
{
|
|
|
|
return s->copy(convert_buffer);
|
|
|
|
}
|
|
|
|
s->swap(convert_buffer);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2004-07-06 14:15:43 +02:00
|
|
|
|
2003-08-18 23:08:08 +02:00
|
|
|
/*
|
|
|
|
Update some cache variables when character set changes
|
|
|
|
*/
|
|
|
|
|
|
|
|
void THD::update_charset()
|
|
|
|
{
|
2004-07-06 14:15:43 +02:00
|
|
|
uint32 not_used;
|
|
|
|
charset_is_system_charset= !String::needs_conversion(0,charset(),
|
|
|
|
system_charset_info,
|
|
|
|
¬_used);
|
|
|
|
charset_is_collation_connection=
|
|
|
|
!String::needs_conversion(0,charset(),variables.collation_connection,
|
|
|
|
¬_used);
|
2006-02-14 05:24:01 +01:00
|
|
|
charset_is_character_set_filesystem=
|
|
|
|
!String::needs_conversion(0, charset(),
|
|
|
|
variables.character_set_filesystem, ¬_used);
|
2003-08-18 23:08:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-03-15 22:57:31 +01:00
|
|
|
/* routings to adding tables to list of changed in transaction tables */
|
|
|
|
|
|
|
|
inline static void list_include(CHANGED_TABLE_LIST** prev,
|
|
|
|
CHANGED_TABLE_LIST* curr,
|
|
|
|
CHANGED_TABLE_LIST* new_table)
|
|
|
|
{
|
|
|
|
if (new_table)
|
|
|
|
{
|
|
|
|
*prev = new_table;
|
|
|
|
(*prev)->next = curr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add table to list of changed in transaction tables */
|
2002-06-11 10:20:31 +02:00
|
|
|
|
2002-03-15 22:57:31 +01:00
|
|
|
void THD::add_changed_table(TABLE *table)
|
|
|
|
{
|
2002-06-11 10:20:31 +02:00
|
|
|
DBUG_ENTER("THD::add_changed_table(table)");
|
2002-03-15 22:57:31 +01:00
|
|
|
|
2002-07-23 17:31:22 +02:00
|
|
|
DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
|
2002-06-11 10:20:31 +02:00
|
|
|
table->file->has_transactions());
|
2005-01-06 12:00:13 +01:00
|
|
|
add_changed_table(table->s->table_cache_key, table->s->key_length);
|
2002-09-19 19:10:06 +02:00
|
|
|
DBUG_VOID_RETURN;
|
2002-09-19 09:36:19 +02:00
|
|
|
}
|
2002-03-15 22:57:31 +01:00
|
|
|
|
2002-11-07 02:54:00 +01:00
|
|
|
|
2002-09-19 09:36:19 +02:00
|
|
|
void THD::add_changed_table(const char *key, long key_length)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("THD::add_changed_table(key)");
|
2002-11-07 02:54:00 +01:00
|
|
|
CHANGED_TABLE_LIST **prev_changed = &transaction.changed_tables;
|
|
|
|
CHANGED_TABLE_LIST *curr = transaction.changed_tables;
|
2002-03-15 22:57:31 +01:00
|
|
|
|
2002-11-07 02:54:00 +01:00
|
|
|
for (; curr; prev_changed = &(curr->next), curr = curr->next)
|
2002-03-15 22:57:31 +01:00
|
|
|
{
|
2002-09-19 09:36:19 +02:00
|
|
|
int cmp = (long)curr->key_length - (long)key_length;
|
2002-03-15 22:57:31 +01:00
|
|
|
if (cmp < 0)
|
|
|
|
{
|
2002-11-07 02:54:00 +01:00
|
|
|
list_include(prev_changed, curr, changed_table_dup(key, key_length));
|
2002-03-15 22:57:31 +01:00
|
|
|
DBUG_PRINT("info",
|
2007-03-22 19:32:07 +01:00
|
|
|
("key_length: %ld %u", key_length,
|
|
|
|
(*prev_changed)->key_length));
|
2002-03-15 22:57:31 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
else if (cmp == 0)
|
|
|
|
{
|
2002-09-19 09:36:19 +02:00
|
|
|
cmp = memcmp(curr->key, key, curr->key_length);
|
2002-03-15 22:57:31 +01:00
|
|
|
if (cmp < 0)
|
|
|
|
{
|
2002-11-07 02:54:00 +01:00
|
|
|
list_include(prev_changed, curr, changed_table_dup(key, key_length));
|
2002-03-15 22:57:31 +01:00
|
|
|
DBUG_PRINT("info",
|
2007-03-28 19:46:42 +02:00
|
|
|
("key_length: %ld %u", key_length,
|
2002-11-07 02:54:00 +01:00
|
|
|
(*prev_changed)->key_length));
|
2002-03-15 22:57:31 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
else if (cmp == 0)
|
|
|
|
{
|
|
|
|
DBUG_PRINT("info", ("already in list"));
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2002-11-07 02:54:00 +01:00
|
|
|
*prev_changed = changed_table_dup(key, key_length);
|
2007-03-22 19:32:07 +01:00
|
|
|
DBUG_PRINT("info", ("key_length: %ld %u", key_length,
|
2002-11-07 02:54:00 +01:00
|
|
|
(*prev_changed)->key_length));
|
2002-03-15 22:57:31 +01:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
2002-06-11 10:20:31 +02:00
|
|
|
|
2002-09-19 09:36:19 +02:00
|
|
|
CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
|
2002-03-15 22:57:31 +01:00
|
|
|
{
|
|
|
|
CHANGED_TABLE_LIST* new_table =
|
|
|
|
(CHANGED_TABLE_LIST*) trans_alloc(ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST))+
|
2002-09-19 09:36:19 +02:00
|
|
|
key_length + 1);
|
2002-03-15 22:57:31 +01:00
|
|
|
if (!new_table)
|
|
|
|
{
|
2004-11-13 18:35:51 +01:00
|
|
|
my_error(EE_OUTOFMEMORY, MYF(ME_BELL),
|
|
|
|
ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1);
|
2003-03-31 10:39:46 +02:00
|
|
|
killed= KILL_CONNECTION;
|
2002-03-15 22:57:31 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_table->key = (char *) (((byte*)new_table)+
|
|
|
|
ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST)));
|
|
|
|
new_table->next = 0;
|
2002-09-19 09:36:19 +02:00
|
|
|
new_table->key_length = key_length;
|
|
|
|
::memcpy(new_table->key, key, key_length);
|
2002-03-15 22:57:31 +01:00
|
|
|
return new_table;
|
|
|
|
}
|
|
|
|
|
2004-09-16 11:47:39 +02:00
|
|
|
|
2002-09-26 22:08:22 +02:00
|
|
|
int THD::send_explain_fields(select_result *result)
|
|
|
|
{
|
|
|
|
List<Item> field_list;
|
|
|
|
Item *item;
|
2004-09-16 11:47:39 +02:00
|
|
|
CHARSET_INFO *cs= system_charset_info;
|
2002-12-11 08:17:51 +01:00
|
|
|
field_list.push_back(new Item_return_int("id",3, MYSQL_TYPE_LONGLONG));
|
2004-09-16 11:47:39 +02:00
|
|
|
field_list.push_back(new Item_empty_string("select_type", 19, cs));
|
2005-06-07 15:34:13 +02:00
|
|
|
field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs));
|
|
|
|
item->maybe_null= 1;
|
|
|
|
field_list.push_back(item= new Item_empty_string("type", 10, cs));
|
|
|
|
item->maybe_null= 1;
|
2002-09-26 22:08:22 +02:00
|
|
|
field_list.push_back(item=new Item_empty_string("possible_keys",
|
2004-09-16 11:47:39 +02:00
|
|
|
NAME_LEN*MAX_KEY, cs));
|
2002-09-26 22:08:22 +02:00
|
|
|
item->maybe_null=1;
|
2004-09-16 11:47:39 +02:00
|
|
|
field_list.push_back(item=new Item_empty_string("key", NAME_LEN, cs));
|
2002-09-26 22:08:22 +02:00
|
|
|
item->maybe_null=1;
|
2003-11-13 15:52:02 +01:00
|
|
|
field_list.push_back(item=new Item_empty_string("key_len",
|
|
|
|
NAME_LEN*MAX_KEY));
|
2002-09-26 22:08:22 +02:00
|
|
|
item->maybe_null=1;
|
|
|
|
field_list.push_back(item=new Item_empty_string("ref",
|
2004-09-16 11:47:39 +02:00
|
|
|
NAME_LEN*MAX_REF_PARTS, cs));
|
2002-09-26 22:08:22 +02:00
|
|
|
item->maybe_null=1;
|
2005-06-07 15:34:13 +02:00
|
|
|
field_list.push_back(item= new Item_return_int("rows", 10,
|
|
|
|
MYSQL_TYPE_LONGLONG));
|
|
|
|
item->maybe_null= 1;
|
2004-09-16 11:47:39 +02:00
|
|
|
field_list.push_back(new Item_empty_string("Extra", 255, cs));
|
2004-08-03 12:32:21 +02:00
|
|
|
return (result->send_fields(field_list,
|
|
|
|
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF));
|
2002-09-26 22:08:22 +02:00
|
|
|
}
|
2002-03-15 22:57:31 +01:00
|
|
|
|
2002-09-03 14:44:25 +02:00
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
|
|
void THD::close_active_vio()
|
|
|
|
{
|
2003-03-12 00:40:06 +01:00
|
|
|
DBUG_ENTER("close_active_vio");
|
2002-09-03 14:44:25 +02:00
|
|
|
safe_mutex_assert_owner(&LOCK_delete);
|
2003-01-28 18:03:05 +01:00
|
|
|
#ifndef EMBEDDED_LIBRARY
|
2002-09-03 14:44:25 +02:00
|
|
|
if (active_vio)
|
|
|
|
{
|
|
|
|
vio_close(active_vio);
|
|
|
|
active_vio = 0;
|
|
|
|
}
|
2003-01-28 18:03:05 +01:00
|
|
|
#endif
|
2003-03-13 13:52:15 +01:00
|
|
|
DBUG_VOID_RETURN;
|
2002-09-03 14:44:25 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-11-14 08:43:24 +01:00
|
|
|
|
2004-10-08 00:21:19 +02:00
|
|
|
struct Item_change_record: public ilink
|
|
|
|
{
|
|
|
|
Item **place;
|
|
|
|
Item *old_value;
|
|
|
|
/* Placement new was hidden by `new' in ilink (TODO: check): */
|
2004-10-08 15:00:36 +02:00
|
|
|
static void *operator new(size_t size, void *mem) { return mem; }
|
2005-02-15 01:55:44 +01:00
|
|
|
static void operator delete(void *ptr, size_t size) {}
|
|
|
|
static void operator delete(void *ptr, void *mem) { /* never called */ }
|
2004-10-08 00:21:19 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Register an item tree tree transformation, performed by the query
|
|
|
|
optimizer. We need a pointer to runtime_memroot because it may be !=
|
2005-09-02 15:21:19 +02:00
|
|
|
thd->mem_root (due to possible set_n_backup_active_arena called for thd).
|
2004-10-08 00:21:19 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
void THD::nocheck_register_item_tree_change(Item **place, Item *old_value,
|
|
|
|
MEM_ROOT *runtime_memroot)
|
|
|
|
{
|
|
|
|
Item_change_record *change;
|
|
|
|
/*
|
|
|
|
Now we use one node per change, which adds some memory overhead,
|
|
|
|
but still is rather fast as we use alloc_root for allocations.
|
|
|
|
A list of item tree changes of an average query should be short.
|
|
|
|
*/
|
|
|
|
void *change_mem= alloc_root(runtime_memroot, sizeof(*change));
|
|
|
|
if (change_mem == 0)
|
|
|
|
{
|
2005-07-01 13:47:45 +02:00
|
|
|
/*
|
|
|
|
OOM, thd->fatal_error() is called by the error handler of the
|
|
|
|
memroot. Just return.
|
|
|
|
*/
|
2004-10-08 00:21:19 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
change= new (change_mem) Item_change_record;
|
|
|
|
change->place= place;
|
|
|
|
change->old_value= old_value;
|
A fix and test case for Bug#5987 "subselect in bool function
crashes server (prepared statements)": the bug was that all boolean
items always recovered its original arguments at statement cleanup
stage.
This collided with Item_subselect::select_transformer, which tries to
permanently change the item tree to use a transformed subselect instead of
original one.
So we had this call sequence for prepare:
mysql_stmt_prepare -> JOIN::prepare ->
Item_subselect::fix_fields -> the item tree gets transformed ->
Item_bool_rowready_func2::cleanup, item tree is recovered to original
state, while it shouldn't have been;
mysql_stmt_execute -> attempts to execute a broken tree -> crash.
Now instead of bluntly recovering all arguments of bool functions in
Item_bool_rowready_func2::cleanup, we recover only those
which were changed, and do it in one place.
There still would exist a possibility for a collision with subselect
tranformation, if permanent and temporary changes were performed at the
same stage.
But fortunately subselect transformation is always done first, so it
doesn't conflict with the optimization done by propogate_cond_constants.
Now we have:
mysql_stmt_prepare -> JOIN::prepare -> subselect transformation
permanently changes the tree -> cleanup doesn't recover anything,
because nothing was registered for recovery.
mysql_stmt_execute -> JOIN::prepare (the tree is already transformed,
so it doesn't change), JOIN::optimize ->
propogate_cond_constants -> temporary changes the item tree
with constants -> JOIN::execute -> cleanup ->
the changes done by propogate_cond_constants are recovered, as
they were registered for recovery.
2004-10-10 00:39:22 +02:00
|
|
|
change_list.append(change);
|
2004-10-08 00:21:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void THD::rollback_item_tree_changes()
|
|
|
|
{
|
|
|
|
I_List_iterator<Item_change_record> it(change_list);
|
|
|
|
Item_change_record *change;
|
2004-11-03 11:39:38 +01:00
|
|
|
DBUG_ENTER("rollback_item_tree_changes");
|
|
|
|
|
2004-10-08 00:21:19 +02:00
|
|
|
while ((change= it++))
|
|
|
|
*change->place= change->old_value;
|
|
|
|
/* We can forget about changes memory: it's allocated in runtime memroot */
|
|
|
|
change_list.empty();
|
2004-11-03 11:39:38 +01:00
|
|
|
DBUG_VOID_RETURN;
|
2004-10-08 00:21:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/*****************************************************************************
|
|
|
|
** Functions to provide a interface to select results
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
select_result::select_result()
|
|
|
|
{
|
|
|
|
thd=current_thd;
|
|
|
|
}
|
|
|
|
|
2003-10-08 17:53:31 +02:00
|
|
|
void select_result::send_error(uint errcode,const char *err)
|
|
|
|
{
|
2004-10-20 03:04:37 +02:00
|
|
|
my_message(errcode, err, MYF(0));
|
2003-10-08 17:53:31 +02:00
|
|
|
}
|
|
|
|
|
2004-08-24 18:17:11 +02:00
|
|
|
|
|
|
|
void select_result::cleanup()
|
|
|
|
{
|
|
|
|
/* do nothing */
|
|
|
|
}
|
|
|
|
|
2006-12-01 11:25:06 +01:00
|
|
|
bool select_result::check_simple_select() const
|
|
|
|
{
|
|
|
|
my_error(ER_SP_BAD_CURSOR_QUERY, MYF(0));
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-10-08 17:53:31 +02:00
|
|
|
static String default_line_term("\n",default_charset_info);
|
|
|
|
static String default_escaped("\\",default_charset_info);
|
|
|
|
static String default_field_term("\t",default_charset_info);
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
sql_exchange::sql_exchange(char *name,bool flag)
|
|
|
|
:file_name(name), opt_enclosed(0), dumpfile(flag), skip_lines(0)
|
|
|
|
{
|
|
|
|
field_term= &default_field_term;
|
2003-09-18 15:58:02 +02:00
|
|
|
enclosed= line_start= &my_empty_string;
|
2000-07-31 21:29:14 +02:00
|
|
|
line_term= &default_line_term;
|
|
|
|
escaped= &default_escaped;
|
2007-02-28 14:06:57 +01:00
|
|
|
cs= NULL;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2004-08-03 12:32:21 +02:00
|
|
|
bool select_send::send_fields(List<Item> &list, uint flags)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2005-09-13 15:32:42 +02:00
|
|
|
bool res;
|
|
|
|
if (!(res= thd->protocol->send_fields(&list, flags)))
|
|
|
|
status= 1;
|
|
|
|
return res;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
2005-09-13 15:32:42 +02:00
|
|
|
void select_send::abort()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("select_send::abort");
|
|
|
|
if (status && thd->spcont &&
|
|
|
|
thd->spcont->find_handler(thd->net.last_errno,
|
|
|
|
MYSQL_ERROR::WARN_LEVEL_ERROR))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Executing stored procedure without a handler.
|
|
|
|
Here we should actually send an error to the client,
|
|
|
|
but as an error will break a multiple result set, the only thing we
|
|
|
|
can do for now is to nicely end the current data set and remembering
|
|
|
|
the error so that the calling routine will abort
|
|
|
|
*/
|
|
|
|
thd->net.report_error= 0;
|
|
|
|
send_eof();
|
|
|
|
thd->net.report_error= 1; // Abort SP
|
|
|
|
}
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/* Send data to client. Returns 0 if ok */
|
|
|
|
|
|
|
|
bool select_send::send_data(List<Item> &items)
|
|
|
|
{
|
2002-05-08 22:14:40 +02:00
|
|
|
if (unit->offset_limit_cnt)
|
2000-07-31 21:29:14 +02:00
|
|
|
{ // using limit offset,count
|
2002-05-08 22:14:40 +02:00
|
|
|
unit->offset_limit_cnt--;
|
2002-12-14 16:43:01 +01:00
|
|
|
return 0;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2002-12-11 08:17:51 +01:00
|
|
|
|
2003-03-16 18:17:54 +01:00
|
|
|
/*
|
|
|
|
We may be passing the control from mysqld to the client: release the
|
|
|
|
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
|
|
|
by thd
|
|
|
|
*/
|
2003-03-03 18:31:01 +01:00
|
|
|
ha_release_temporary_latches(thd);
|
|
|
|
|
2002-12-11 08:17:51 +01:00
|
|
|
List_iterator_fast<Item> li(items);
|
|
|
|
Protocol *protocol= thd->protocol;
|
|
|
|
char buff[MAX_FIELD_WIDTH];
|
2003-01-29 14:31:20 +01:00
|
|
|
String buffer(buff, sizeof(buff), &my_charset_bin);
|
2006-07-04 19:10:13 +02:00
|
|
|
DBUG_ENTER("select_send::send_data");
|
2002-12-11 08:17:51 +01:00
|
|
|
|
|
|
|
protocol->prepare_for_resend();
|
2000-07-31 21:29:14 +02:00
|
|
|
Item *item;
|
|
|
|
while ((item=li++))
|
|
|
|
{
|
2002-12-11 08:17:51 +01:00
|
|
|
if (item->send(protocol, &buffer))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2002-12-11 08:17:51 +01:00
|
|
|
protocol->free(); // Free used buffer
|
2002-09-03 08:50:36 +02:00
|
|
|
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
|
2002-12-11 08:17:51 +01:00
|
|
|
break;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
}
|
2000-09-16 03:27:21 +02:00
|
|
|
thd->sent_row_count++;
|
2004-05-28 12:59:29 +02:00
|
|
|
if (!thd->vio_ok())
|
2004-02-04 18:35:20 +01:00
|
|
|
DBUG_RETURN(0);
|
2002-09-03 08:50:36 +02:00
|
|
|
if (!thd->net.report_error)
|
2002-12-11 08:17:51 +01:00
|
|
|
DBUG_RETURN(protocol->write());
|
2006-02-24 17:34:15 +01:00
|
|
|
protocol->remove_last_row();
|
2002-12-11 08:17:51 +01:00
|
|
|
DBUG_RETURN(1);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool select_send::send_eof()
|
|
|
|
{
|
2003-03-03 18:31:01 +01:00
|
|
|
/* We may be passing the control from mysqld to the client: release the
|
|
|
|
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
|
|
|
by thd */
|
|
|
|
ha_release_temporary_latches(thd);
|
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
/* Unlock tables before sending packet to gain some speed */
|
|
|
|
if (thd->lock)
|
|
|
|
{
|
2004-10-26 18:30:01 +02:00
|
|
|
mysql_unlock_tables(thd, thd->lock);
|
|
|
|
thd->lock=0;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2002-09-03 08:50:36 +02:00
|
|
|
if (!thd->net.report_error)
|
|
|
|
{
|
2002-10-04 13:15:59 +02:00
|
|
|
::send_eof(thd);
|
2005-09-13 15:32:42 +02:00
|
|
|
status= 0;
|
2002-09-03 08:50:36 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return 1;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
/************************************************************************
|
|
|
|
Handling writing to file
|
|
|
|
************************************************************************/
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
void select_to_file::send_error(uint errcode,const char *err)
|
|
|
|
{
|
2004-10-20 03:04:37 +02:00
|
|
|
my_message(errcode, err, MYF(0));
|
2004-02-05 10:22:08 +01:00
|
|
|
if (file > 0)
|
|
|
|
{
|
|
|
|
(void) end_io_cache(&cache);
|
|
|
|
(void) my_close(file,MYF(0));
|
|
|
|
(void) my_delete(path,MYF(0)); // Delete file on error
|
|
|
|
file= -1;
|
|
|
|
}
|
|
|
|
}
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
|
2004-08-24 18:17:11 +02:00
|
|
|
bool select_to_file::send_eof()
|
|
|
|
{
|
|
|
|
int error= test(end_io_cache(&cache));
|
|
|
|
if (my_close(file,MYF(MY_WME)))
|
|
|
|
error= 1;
|
|
|
|
if (!error)
|
|
|
|
::send_ok(thd,row_count);
|
|
|
|
file= -1;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void select_to_file::cleanup()
|
|
|
|
{
|
|
|
|
/* In case of error send_eof() may be not called: close the file here. */
|
|
|
|
if (file >= 0)
|
|
|
|
{
|
|
|
|
(void) end_io_cache(&cache);
|
|
|
|
(void) my_close(file,MYF(0));
|
|
|
|
file= -1;
|
|
|
|
}
|
|
|
|
path[0]= '\0';
|
|
|
|
row_count= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
select_to_file::~select_to_file()
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
if (file >= 0)
|
|
|
|
{ // This only happens in case of error
|
|
|
|
(void) end_io_cache(&cache);
|
|
|
|
(void) my_close(file,MYF(0));
|
|
|
|
file= -1;
|
|
|
|
}
|
2004-02-05 10:22:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/***************************************************************************
|
|
|
|
** Export of select to textfile
|
|
|
|
***************************************************************************/
|
|
|
|
|
|
|
|
select_export::~select_export()
|
|
|
|
{
|
2000-09-16 03:27:21 +02:00
|
|
|
thd->sent_row_count=row_count;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
/*
|
|
|
|
Create file with IO cache
|
|
|
|
|
|
|
|
SYNOPSIS
|
|
|
|
create_file()
|
|
|
|
thd Thread handle
|
|
|
|
path File name
|
|
|
|
exchange Excange class
|
|
|
|
cache IO cache
|
|
|
|
|
|
|
|
RETURN
|
|
|
|
>= 0 File handle
|
|
|
|
-1 Error
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
static File create_file(THD *thd, char *path, sql_exchange *exchange,
|
|
|
|
IO_CACHE *cache)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2004-02-05 10:22:08 +01:00
|
|
|
File file;
|
2007-02-14 14:44:34 +01:00
|
|
|
uint option= MY_UNPACK_FILENAME | MY_RELATIVE_PATH;
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
|
2000-07-31 21:29:14 +02:00
|
|
|
#ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS
|
2004-02-05 10:22:08 +01:00
|
|
|
option|= MY_REPLACE_DIR; // Force use of db directory
|
2000-07-31 21:29:14 +02:00
|
|
|
#endif
|
2004-08-24 19:45:32 +02:00
|
|
|
|
2004-09-13 18:49:41 +02:00
|
|
|
if (!dirname_length(exchange->file_name))
|
2004-09-07 08:55:34 +02:00
|
|
|
{
|
|
|
|
strxnmov(path, FN_REFLEN, mysql_real_data_home, thd->db ? thd->db : "", NullS);
|
|
|
|
(void) fn_format(path, exchange->file_name, path, "", option);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
(void) fn_format(path, exchange->file_name, mysql_real_data_home, "", option);
|
2007-02-14 14:44:34 +01:00
|
|
|
|
|
|
|
if (opt_secure_file_priv &&
|
|
|
|
strncmp(opt_secure_file_priv, path, strlen(opt_secure_file_priv)))
|
|
|
|
{
|
|
|
|
/* Write only allowed to dir or subdir specified by secure_file_priv */
|
|
|
|
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
if (!access(path, F_OK))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2004-11-13 18:35:51 +01:00
|
|
|
my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name);
|
2004-02-09 12:31:03 +01:00
|
|
|
return -1;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
/* Create the file world readable */
|
2004-12-10 18:49:36 +01:00
|
|
|
if ((file= my_create(path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0)
|
2004-02-05 10:22:08 +01:00
|
|
|
return file;
|
2000-07-31 21:29:14 +02:00
|
|
|
#ifdef HAVE_FCHMOD
|
2004-02-05 10:22:08 +01:00
|
|
|
(void) fchmod(file, 0666); // Because of umask()
|
2000-07-31 21:29:14 +02:00
|
|
|
#else
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
(void) chmod(path, 0666);
|
2000-07-31 21:29:14 +02:00
|
|
|
#endif
|
2004-02-05 10:22:08 +01:00
|
|
|
if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME)))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2004-02-05 10:22:08 +01:00
|
|
|
my_close(file, MYF(0));
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
my_delete(path, MYF(0)); // Delete file on error, it was just created
|
2004-02-05 10:22:08 +01:00
|
|
|
return -1;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
2004-02-05 10:22:08 +01:00
|
|
|
return file;
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|
|
|
{
|
|
|
|
bool blob_flag=0;
|
|
|
|
unit= u;
|
|
|
|
if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN)
|
|
|
|
strmake(path,exchange->file_name,FN_REFLEN-1);
|
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
if ((file= create_file(thd, path, exchange, &cache)) < 0)
|
Fixed Bug#2123, mysqld segmentation faulted when it tried to
open a file that already existed. The problem was that end_io_cache()
was called even if init_io_cache() was not. This affected both
OUTFILE and DUMPFILE (both fixed). Sometimes wrongly aligned pointer was freed,
sometimes mysqld core dumped.
Other problem was that select_dump::send_error removed the dumpfile,
even if it was created by an earlier run, or by some other program, if
the file permissions just permitted it. Fixed it so that the file will
only be deleted, if an error occurred, but the file was created by mysqld
just a moment ago, in that thread.
On the other hand, select_export did not handle the corresponding garbage
file at all. Both fixed.
After these fixes, a big part of the select_export::prepare and select_dump::prepare
code became identical. Merged the code into a new function called create_file(),
which is now called by the two latter functions.
Regards,
Jani
2004-01-15 05:48:31 +01:00
|
|
|
return 1;
|
2000-07-31 21:29:14 +02:00
|
|
|
/* Check if there is any blobs in data */
|
|
|
|
{
|
2001-08-02 05:29:50 +02:00
|
|
|
List_iterator_fast<Item> li(list);
|
2000-07-31 21:29:14 +02:00
|
|
|
Item *item;
|
|
|
|
while ((item=li++))
|
|
|
|
{
|
|
|
|
if (item->max_length >= MAX_BLOB_WIDTH)
|
|
|
|
{
|
|
|
|
blob_flag=1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
field_term_length=exchange->field_term->length();
|
|
|
|
if (!exchange->line_term->length())
|
|
|
|
exchange->line_term=exchange->field_term; // Use this if it exists
|
|
|
|
field_sep_char= (exchange->enclosed->length() ? (*exchange->enclosed)[0] :
|
|
|
|
field_term_length ? (*exchange->field_term)[0] : INT_MAX);
|
|
|
|
escape_char= (exchange->escaped->length() ? (*exchange->escaped)[0] : -1);
|
|
|
|
line_sep_char= (exchange->line_term->length() ?
|
|
|
|
(*exchange->line_term)[0] : INT_MAX);
|
|
|
|
if (!field_term_length)
|
|
|
|
exchange->opt_enclosed=0;
|
|
|
|
if (!exchange->enclosed->length())
|
|
|
|
exchange->opt_enclosed=1; // A little quicker loop
|
|
|
|
fixed_row_size= (!field_term_length && !exchange->enclosed->length() &&
|
|
|
|
!blob_flag);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool select_export::send_data(List<Item> &items)
|
|
|
|
{
|
|
|
|
|
2006-06-28 18:55:30 +02:00
|
|
|
DBUG_ENTER("select_export::send_data");
|
2000-07-31 21:29:14 +02:00
|
|
|
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
|
|
|
|
bool space_inited=0;
|
2003-01-29 14:31:20 +01:00
|
|
|
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
2000-07-31 21:29:14 +02:00
|
|
|
tmp.length(0);
|
|
|
|
|
2002-05-08 22:14:40 +02:00
|
|
|
if (unit->offset_limit_cnt)
|
2000-07-31 21:29:14 +02:00
|
|
|
{ // using limit offset,count
|
2002-05-08 22:14:40 +02:00
|
|
|
unit->offset_limit_cnt--;
|
2000-07-31 21:29:14 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
row_count++;
|
|
|
|
Item *item;
|
|
|
|
uint used_length=0,items_left=items.elements;
|
2001-08-02 05:29:50 +02:00
|
|
|
List_iterator_fast<Item> li(items);
|
2000-07-31 21:29:14 +02:00
|
|
|
|
|
|
|
if (my_b_write(&cache,(byte*) exchange->line_start->ptr(),
|
|
|
|
exchange->line_start->length()))
|
|
|
|
goto err;
|
|
|
|
while ((item=li++))
|
|
|
|
{
|
|
|
|
Item_result result_type=item->result_type();
|
|
|
|
res=item->str_result(&tmp);
|
|
|
|
if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT))
|
|
|
|
{
|
|
|
|
if (my_b_write(&cache,(byte*) exchange->enclosed->ptr(),
|
|
|
|
exchange->enclosed->length()))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (!res)
|
|
|
|
{ // NULL
|
|
|
|
if (!fixed_row_size)
|
|
|
|
{
|
|
|
|
if (escape_char != -1) // Use \N syntax
|
|
|
|
{
|
|
|
|
null_buff[0]=escape_char;
|
|
|
|
null_buff[1]='N';
|
|
|
|
if (my_b_write(&cache,(byte*) null_buff,2))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
else if (my_b_write(&cache,(byte*) "NULL",4))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
used_length=0; // Fill with space
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (fixed_row_size)
|
|
|
|
used_length=min(res->length(),item->max_length);
|
|
|
|
else
|
|
|
|
used_length=res->length();
|
|
|
|
if (result_type == STRING_RESULT && escape_char != -1)
|
|
|
|
{
|
|
|
|
char *pos,*start,*end;
|
|
|
|
|
|
|
|
for (start=pos=(char*) res->ptr(),end=pos+used_length ;
|
|
|
|
pos != end ;
|
|
|
|
pos++)
|
|
|
|
{
|
|
|
|
#ifdef USE_MB
|
2002-12-20 14:57:24 +01:00
|
|
|
CHARSET_INFO *res_charset=res->charset();
|
|
|
|
if (use_mb(res_charset))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
int l;
|
2002-12-20 14:57:24 +01:00
|
|
|
if ((l=my_ismbchar(res_charset, pos, end)))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
pos += l-1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if ((int) *pos == escape_char || (int) *pos == field_sep_char ||
|
|
|
|
(int) *pos == line_sep_char || !*pos)
|
|
|
|
{
|
|
|
|
char tmp_buff[2];
|
|
|
|
tmp_buff[0]= escape_char;
|
|
|
|
tmp_buff[1]= *pos ? *pos : '0';
|
|
|
|
if (my_b_write(&cache,(byte*) start,(uint) (pos-start)) ||
|
|
|
|
my_b_write(&cache,(byte*) tmp_buff,2))
|
|
|
|
goto err;
|
|
|
|
start=pos+1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (my_b_write(&cache,(byte*) start,(uint) (pos-start)))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
else if (my_b_write(&cache,(byte*) res->ptr(),used_length))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (fixed_row_size)
|
|
|
|
{ // Fill with space
|
|
|
|
if (item->max_length > used_length)
|
|
|
|
{
|
|
|
|
/* QQ: Fix by adding a my_b_fill() function */
|
|
|
|
if (!space_inited)
|
|
|
|
{
|
|
|
|
space_inited=1;
|
|
|
|
bfill(space,sizeof(space),' ');
|
|
|
|
}
|
|
|
|
uint length=item->max_length-used_length;
|
2002-06-11 10:20:31 +02:00
|
|
|
for (; length > sizeof(space) ; length-=sizeof(space))
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
|
|
|
if (my_b_write(&cache,(byte*) space,sizeof(space)))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (my_b_write(&cache,(byte*) space,length))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT))
|
|
|
|
{
|
2007-03-20 19:09:28 +01:00
|
|
|
if (my_b_write(&cache, (byte*) exchange->enclosed->ptr(),
|
|
|
|
exchange->enclosed->length()))
|
|
|
|
goto err;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
if (--items_left)
|
|
|
|
{
|
2007-03-20 19:09:28 +01:00
|
|
|
if (my_b_write(&cache, (byte*) exchange->field_term->ptr(),
|
|
|
|
field_term_length))
|
|
|
|
goto err;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (my_b_write(&cache,(byte*) exchange->line_term->ptr(),
|
|
|
|
exchange->line_term->length()))
|
|
|
|
goto err;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
err:
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***************************************************************************
|
|
|
|
** Dump of select to a binary file
|
|
|
|
***************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2002-05-08 22:14:40 +02:00
|
|
|
select_dump::prepare(List<Item> &list __attribute__((unused)),
|
|
|
|
SELECT_LEX_UNIT *u)
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2002-05-08 22:14:40 +02:00
|
|
|
unit= u;
|
2004-02-05 10:22:08 +01:00
|
|
|
return (int) ((file= create_file(thd, path, exchange, &cache)) < 0);
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool select_dump::send_data(List<Item> &items)
|
|
|
|
{
|
2001-08-02 05:29:50 +02:00
|
|
|
List_iterator_fast<Item> li(items);
|
2000-07-31 21:29:14 +02:00
|
|
|
char buff[MAX_FIELD_WIDTH];
|
2003-01-29 14:31:20 +01:00
|
|
|
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
2000-07-31 21:29:14 +02:00
|
|
|
tmp.length(0);
|
|
|
|
Item *item;
|
2006-06-28 18:55:30 +02:00
|
|
|
DBUG_ENTER("select_dump::send_data");
|
2000-07-31 21:29:14 +02:00
|
|
|
|
2002-05-08 22:14:40 +02:00
|
|
|
if (unit->offset_limit_cnt)
|
2000-07-31 21:29:14 +02:00
|
|
|
{ // using limit offset,count
|
2002-05-08 22:14:40 +02:00
|
|
|
unit->offset_limit_cnt--;
|
2000-07-31 21:29:14 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
if (row_count++ > 1)
|
|
|
|
{
|
2004-11-12 13:34:00 +01:00
|
|
|
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
|
2000-07-31 21:29:14 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
while ((item=li++))
|
|
|
|
{
|
|
|
|
res=item->str_result(&tmp);
|
2000-09-28 23:58:16 +02:00
|
|
|
if (!res) // If NULL
|
2000-07-31 21:29:14 +02:00
|
|
|
{
|
2000-09-28 23:58:16 +02:00
|
|
|
if (my_b_write(&cache,(byte*) "",1))
|
|
|
|
goto err;
|
2000-07-31 21:29:14 +02:00
|
|
|
}
|
|
|
|
else if (my_b_write(&cache,(byte*) res->ptr(),res->length()))
|
|
|
|
{
|
2004-11-13 18:35:51 +01:00
|
|
|
my_error(ER_ERROR_ON_WRITE, MYF(0), path, my_errno);
|
2000-07-31 21:29:14 +02:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
err:
|
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-11-28 11:18:13 +01:00
|
|
|
select_subselect::select_subselect(Item_subselect *item_arg)
|
2002-05-12 22:46:42 +02:00
|
|
|
{
|
2003-11-28 11:18:13 +01:00
|
|
|
item= item_arg;
|
2002-05-12 22:46:42 +02:00
|
|
|
}
|
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
|
2002-12-19 20:15:09 +01:00
|
|
|
bool select_singlerow_subselect::send_data(List<Item> &items)
|
2002-05-12 22:46:42 +02:00
|
|
|
{
|
2002-12-19 20:15:09 +01:00
|
|
|
DBUG_ENTER("select_singlerow_subselect::send_data");
|
|
|
|
Item_singlerow_subselect *it= (Item_singlerow_subselect *)item;
|
2002-11-18 21:41:57 +01:00
|
|
|
if (it->assigned())
|
|
|
|
{
|
2004-01-02 23:12:07 +01:00
|
|
|
my_message(ER_SUBQUERY_NO_1_ROW, ER(ER_SUBQUERY_NO_1_ROW), MYF(0));
|
2002-05-28 21:38:17 +02:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
|
|
|
if (unit->offset_limit_cnt)
|
2002-06-01 22:35:36 +02:00
|
|
|
{ // Using limit offset,count
|
2002-05-28 21:38:17 +02:00
|
|
|
unit->offset_limit_cnt--;
|
|
|
|
DBUG_RETURN(0);
|
2002-05-12 22:46:42 +02:00
|
|
|
}
|
2002-06-01 22:35:36 +02:00
|
|
|
List_iterator_fast<Item> li(items);
|
2002-12-19 06:38:33 +01:00
|
|
|
Item *val_item;
|
|
|
|
for (uint i= 0; (val_item= li++); i++)
|
|
|
|
it->store(i, val_item);
|
2002-09-03 08:50:36 +02:00
|
|
|
it->assigned(1);
|
2002-05-28 21:38:17 +02:00
|
|
|
DBUG_RETURN(0);
|
2002-05-12 22:46:42 +02:00
|
|
|
}
|
2002-06-19 16:52:44 +02:00
|
|
|
|
2004-02-05 10:22:08 +01:00
|
|
|
|
2005-01-26 14:27:45 +01:00
|
|
|
void select_max_min_finder_subselect::cleanup()
|
|
|
|
{
|
|
|
|
DBUG_ENTER("select_max_min_finder_subselect::cleanup");
|
|
|
|
cache= 0;
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-08-12 11:38:03 +02:00
|
|
|
bool select_max_min_finder_subselect::send_data(List<Item> &items)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("select_max_min_finder_subselect::send_data");
|
2004-11-18 17:10:07 +01:00
|
|
|
Item_maxmin_subselect *it= (Item_maxmin_subselect *)item;
|
2003-08-12 11:38:03 +02:00
|
|
|
List_iterator_fast<Item> li(items);
|
|
|
|
Item *val_item= li++;
|
2004-11-18 17:10:07 +01:00
|
|
|
it->register_value();
|
2003-08-12 11:38:03 +02:00
|
|
|
if (it->assigned())
|
|
|
|
{
|
|
|
|
cache->store(val_item);
|
|
|
|
if ((this->*op)())
|
|
|
|
it->store(0, cache);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!cache)
|
|
|
|
{
|
|
|
|
cache= Item_cache::get_cache(val_item->result_type());
|
|
|
|
switch (val_item->result_type())
|
|
|
|
{
|
|
|
|
case REAL_RESULT:
|
|
|
|
op= &select_max_min_finder_subselect::cmp_real;
|
|
|
|
break;
|
|
|
|
case INT_RESULT:
|
|
|
|
op= &select_max_min_finder_subselect::cmp_int;
|
|
|
|
break;
|
|
|
|
case STRING_RESULT:
|
|
|
|
op= &select_max_min_finder_subselect::cmp_str;
|
|
|
|
break;
|
2005-02-08 23:50:45 +01:00
|
|
|
case DECIMAL_RESULT:
|
|
|
|
op= &select_max_min_finder_subselect::cmp_decimal;
|
|
|
|
break;
|
2003-08-12 11:38:03 +02:00
|
|
|
case ROW_RESULT:
|
|
|
|
// This case should never be choosen
|
|
|
|
DBUG_ASSERT(0);
|
|
|
|
op= 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cache->store(val_item);
|
|
|
|
it->store(0, cache);
|
|
|
|
}
|
|
|
|
it->assigned(1);
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool select_max_min_finder_subselect::cmp_real()
|
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
2004-11-11 19:39:35 +01:00
|
|
|
double val1= cache->val_real(), val2= maxmin->val_real();
|
2003-08-12 11:38:03 +02:00
|
|
|
if (fmax)
|
|
|
|
return (cache->null_value && !maxmin->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
val1 > val2);
|
2005-02-19 17:58:27 +01:00
|
|
|
return (maxmin->null_value && !cache->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
val1 < val2);
|
2003-08-12 11:38:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool select_max_min_finder_subselect::cmp_int()
|
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
2003-08-12 11:38:03 +02:00
|
|
|
longlong val1= cache->val_int(), val2= maxmin->val_int();
|
|
|
|
if (fmax)
|
|
|
|
return (cache->null_value && !maxmin->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
val1 > val2);
|
2005-02-19 17:58:27 +01:00
|
|
|
return (maxmin->null_value && !cache->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
val1 < val2);
|
2003-08-12 11:38:03 +02:00
|
|
|
}
|
|
|
|
|
2005-02-08 23:50:45 +01:00
|
|
|
bool select_max_min_finder_subselect::cmp_decimal()
|
|
|
|
{
|
2006-12-14 23:51:37 +01:00
|
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
2005-02-08 23:50:45 +01:00
|
|
|
my_decimal cval, *cvalue= cache->val_decimal(&cval);
|
|
|
|
my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
|
|
|
|
if (fmax)
|
|
|
|
return (cache->null_value && !maxmin->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
my_decimal_cmp(cvalue, mvalue) > 0) ;
|
2005-02-19 17:58:27 +01:00
|
|
|
return (maxmin->null_value && !cache->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
my_decimal_cmp(cvalue,mvalue) < 0);
|
2005-02-08 23:50:45 +01:00
|
|
|
}
|
|
|
|
|
2003-08-12 11:38:03 +02:00
|
|
|
bool select_max_min_finder_subselect::cmp_str()
|
|
|
|
{
|
|
|
|
String *val1, *val2, buf1, buf2;
|
2006-12-14 23:51:37 +01:00
|
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
2003-08-12 11:38:03 +02:00
|
|
|
/*
|
|
|
|
as far as both operand is Item_cache buf1 & buf2 will not be used,
|
|
|
|
but added for safety
|
|
|
|
*/
|
|
|
|
val1= cache->val_str(&buf1);
|
|
|
|
val2= maxmin->val_str(&buf1);
|
|
|
|
if (fmax)
|
|
|
|
return (cache->null_value && !maxmin->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
sortcmp(val1, val2, cache->collation.collation) > 0) ;
|
2005-02-19 17:58:27 +01:00
|
|
|
return (maxmin->null_value && !cache->null_value) ||
|
|
|
|
(!cache->null_value && !maxmin->null_value &&
|
|
|
|
sortcmp(val1, val2, cache->collation.collation) < 0);
|
2003-08-12 11:38:03 +02:00
|
|
|
}
|
|
|
|
|
2002-06-19 16:52:44 +02:00
|
|
|
bool select_exists_subselect::send_data(List<Item> &items)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("select_exists_subselect::send_data");
|
|
|
|
Item_exists_subselect *it= (Item_exists_subselect *)item;
|
|
|
|
if (unit->offset_limit_cnt)
|
|
|
|
{ // Using limit offset,count
|
|
|
|
unit->offset_limit_cnt--;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
it->value= 1;
|
2002-09-03 08:50:36 +02:00
|
|
|
it->assigned(1);
|
2002-06-19 16:52:44 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
2002-10-11 20:49:10 +02:00
|
|
|
|
|
|
|
/***************************************************************************
|
2004-02-05 10:22:08 +01:00
|
|
|
Dump of select to variables
|
2002-10-11 20:49:10 +02:00
|
|
|
***************************************************************************/
|
2004-02-05 10:22:08 +01:00
|
|
|
|
2002-10-16 15:55:08 +02:00
|
|
|
int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
2002-10-11 20:49:10 +02:00
|
|
|
{
|
2003-10-01 16:15:24 +02:00
|
|
|
unit= u;
|
2006-11-28 23:21:39 +01:00
|
|
|
|
2002-10-16 15:55:08 +02:00
|
|
|
if (var_list.elements != list.elements)
|
2002-10-11 20:49:10 +02:00
|
|
|
{
|
2004-11-12 13:34:00 +01:00
|
|
|
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
|
|
|
|
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0));
|
2002-10-16 15:55:08 +02:00
|
|
|
return 1;
|
2006-11-29 18:15:15 +01:00
|
|
|
}
|
2002-10-16 15:55:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2003-03-06 19:16:46 +01:00
|
|
|
|
2003-10-01 16:15:24 +02:00
|
|
|
|
2006-12-01 11:25:06 +01:00
|
|
|
bool select_dumpvar::check_simple_select() const
|
|
|
|
{
|
|
|
|
my_error(ER_SP_BAD_CURSOR_SELECT, MYF(0));
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-08-24 18:17:11 +02:00
|
|
|
void select_dumpvar::cleanup()
|
|
|
|
{
|
2006-11-28 23:21:39 +01:00
|
|
|
row_count= 0;
|
2004-08-24 18:17:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-15 19:58:35 +02:00
|
|
|
Query_arena::Type Query_arena::type() const
|
2004-08-21 00:02:46 +02:00
|
|
|
{
|
2004-09-06 14:14:10 +02:00
|
|
|
DBUG_ASSERT(0); /* Should never be called */
|
2004-08-21 00:02:46 +02:00
|
|
|
return STATEMENT;
|
2004-05-20 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-23 18:22:08 +02:00
|
|
|
void Query_arena::free_items()
|
|
|
|
{
|
|
|
|
Item *next;
|
|
|
|
DBUG_ENTER("Query_arena::free_items");
|
|
|
|
/* This works because items are allocated with sql_alloc() */
|
|
|
|
for (; free_list; free_list= next)
|
|
|
|
{
|
|
|
|
next= free_list->next;
|
|
|
|
free_list->delete_self();
|
|
|
|
}
|
|
|
|
/* Postcondition: free_list is 0 */
|
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-22 00:11:21 +02:00
|
|
|
void Query_arena::set_query_arena(Query_arena *set)
|
|
|
|
{
|
|
|
|
mem_root= set->mem_root;
|
|
|
|
free_list= set->free_list;
|
|
|
|
state= set->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Query_arena::cleanup_stmt()
|
|
|
|
{
|
|
|
|
DBUG_ASSERT("Query_arena::cleanup_stmt()" == "not implemented");
|
|
|
|
}
|
|
|
|
|
2003-12-20 00:16:10 +01:00
|
|
|
/*
|
|
|
|
Statement functions
|
|
|
|
*/
|
|
|
|
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
|
|
|
|
enum enum_state state_arg, ulong id_arg)
|
|
|
|
:Query_arena(mem_root_arg, state_arg),
|
2005-06-22 21:12:25 +02:00
|
|
|
id(id_arg),
|
2003-12-20 00:16:10 +01:00
|
|
|
set_query_id(1),
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
lex(lex_arg),
|
2003-12-20 00:16:10 +01:00
|
|
|
query(0),
|
2004-08-03 12:32:21 +02:00
|
|
|
query_length(0),
|
|
|
|
cursor(0)
|
2003-12-20 00:16:10 +01:00
|
|
|
{
|
2004-04-12 23:58:48 +02:00
|
|
|
name.str= NULL;
|
2003-12-20 00:16:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-06-15 19:58:35 +02:00
|
|
|
Query_arena::Type Statement::type() const
|
2003-12-20 00:16:10 +01:00
|
|
|
{
|
|
|
|
return STATEMENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Statement::set_statement(Statement *stmt)
|
|
|
|
{
|
|
|
|
id= stmt->id;
|
|
|
|
set_query_id= stmt->set_query_id;
|
|
|
|
lex= stmt->lex;
|
|
|
|
query= stmt->query;
|
|
|
|
query_length= stmt->query_length;
|
2004-08-03 12:32:21 +02:00
|
|
|
cursor= stmt->cursor;
|
2003-12-20 00:16:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-08-21 00:02:46 +02:00
|
|
|
void
|
|
|
|
Statement::set_n_backup_statement(Statement *stmt, Statement *backup)
|
|
|
|
{
|
2005-07-01 06:05:42 +02:00
|
|
|
DBUG_ENTER("Statement::set_n_backup_statement");
|
2004-08-21 00:02:46 +02:00
|
|
|
backup->set_statement(this);
|
|
|
|
set_statement(stmt);
|
2005-07-01 06:05:42 +02:00
|
|
|
DBUG_VOID_RETURN;
|
2004-08-21 00:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Statement::restore_backup_statement(Statement *stmt, Statement *backup)
|
|
|
|
{
|
2005-07-01 06:05:42 +02:00
|
|
|
DBUG_ENTER("Statement::restore_backup_statement");
|
2004-08-21 00:02:46 +02:00
|
|
|
stmt->set_statement(this);
|
|
|
|
set_statement(backup);
|
2005-07-01 06:05:42 +02:00
|
|
|
DBUG_VOID_RETURN;
|
2004-08-21 00:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-14 00:53:59 +02:00
|
|
|
void THD::end_statement()
|
2004-08-24 18:17:11 +02:00
|
|
|
{
|
A fix for Bug#26750 "valgrind leak in sp_head" (and post-review
fixes).
The legend: on a replication slave, in case a trigger creation
was filtered out because of application of replicate-do-table/
replicate-ignore-table rule, the parsed definition of a trigger was not
cleaned up properly. LEX::sphead member was left around and leaked
memory. Until the actual implementation of support of
replicate-ignore-table rules for triggers by the patch for Bug 24478 it
was never the case that "case SQLCOM_CREATE_TRIGGER"
was not executed once a trigger was parsed,
so the deletion of lex->sphead there worked and the memory did not leak.
The fix:
The real cause of the bug is that there is no 1 or 2 places where
we can clean up the main LEX after parse. And the reason we
can not have just one or two places where we clean up the LEX is
asymmetric behaviour of MYSQLparse in case of success or error.
One of the root causes of this behaviour is the code in Item::Item()
constructor. There, a newly created item adds itself to THD::free_list
- a single-linked list of Items used in a statement. Yuck. This code
is unaware that we may have more than one statement active at a time,
and always assumes that the free_list of the current statement is
located in THD::free_list. One day we need to be able to explicitly
allocate an item in a given Query_arena.
Thus, when parsing a definition of a stored procedure, like
CREATE PROCEDURE p1() BEGIN SELECT a FROM t1; SELECT b FROM t1; END;
we actually need to reset THD::mem_root, THD::free_list and THD::lex
to parse the nested procedure statement (SELECT *).
The actual reset and restore is implemented in semantic actions
attached to sp_proc_stmt grammar rule.
The problem is that in case of a parsing error inside a nested statement
Bison generated parser would abort immediately, without executing the
restore part of the semantic action. This would leave THD in an
in-the-middle-of-parsing state.
This is why we couldn't have had a single place where we clean up the LEX
after MYSQLparse - in case of an error we needed to do a clean up
immediately, in case of success a clean up could have been delayed.
This left the door open for a memory leak.
One of the following possibilities were considered when working on a fix:
- patch the replication logic to do the clean up. Rejected
as breaks module borders, replication code should not need to know the
gory details of clean up procedure after CREATE TRIGGER.
- wrap MYSQLparse with a function that would do a clean up.
Rejected as ideally we should fix the problem when it happens, not
adjust for it outside of the problematic code.
- make sure MYSQLparse cleans up after itself by invoking the clean up
functionality in the appropriate places before return. Implemented in
this patch.
- use %destructor rule for sp_proc_stmt to restore THD - cleaner
than the prevoius approach, but rejected
because needs a careful analysis of the side effects, and this patch is
for 5.0, and long term we need to use the next alternative anyway
- make sure that sp_proc_stmt doesn't juggle with THD - this is a
large work that will affect many modules.
Cleanup: move main_lex and main_mem_root from Statement to its
only two descendants Prepared_statement and THD. This ensures that
when a Statement instance was created for purposes of statement backup,
we do not involve LEX constructor/destructor, which is fairly expensive.
In order to track that the transformation produces equivalent
functionality please check the respective constructors and destructors
of Statement, Prepared_statement and THD - these members were
used only there.
This cleanup is unrelated to the patch.
2007-03-07 10:24:46 +01:00
|
|
|
/* Cleanup SQL processing state to reuse this statement in next query. */
|
2004-08-24 18:17:11 +02:00
|
|
|
lex_end(lex);
|
|
|
|
delete lex->result;
|
|
|
|
lex->result= 0;
|
2004-09-15 21:10:31 +02:00
|
|
|
/* Note that free_list is freed in cleanup_after_query() */
|
|
|
|
|
2004-08-24 18:17:11 +02:00
|
|
|
/*
|
|
|
|
Don't free mem_root, as mem_root is freed in the end of dispatch_command
|
|
|
|
(once for any command).
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-02 15:21:19 +02:00
|
|
|
void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup)
|
2004-02-08 19:14:13 +01:00
|
|
|
{
|
2005-09-02 15:21:19 +02:00
|
|
|
DBUG_ENTER("THD::set_n_backup_active_arena");
|
2005-06-23 18:22:08 +02:00
|
|
|
DBUG_ASSERT(backup->is_backup_arena == FALSE);
|
2005-07-01 06:05:42 +02:00
|
|
|
|
2005-09-02 15:21:19 +02:00
|
|
|
backup->set_query_arena(this);
|
|
|
|
set_query_arena(set);
|
2004-11-03 11:39:38 +01:00
|
|
|
#ifndef DBUG_OFF
|
2005-06-23 18:22:08 +02:00
|
|
|
backup->is_backup_arena= TRUE;
|
2004-11-03 11:39:38 +01:00
|
|
|
#endif
|
2004-09-09 05:59:26 +02:00
|
|
|
DBUG_VOID_RETURN;
|
2004-02-08 19:14:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-02 15:21:19 +02:00
|
|
|
void THD::restore_active_arena(Query_arena *set, Query_arena *backup)
|
2004-02-12 02:10:26 +01:00
|
|
|
{
|
2005-09-02 15:21:19 +02:00
|
|
|
DBUG_ENTER("THD::restore_active_arena");
|
2005-06-23 18:22:08 +02:00
|
|
|
DBUG_ASSERT(backup->is_backup_arena);
|
2005-09-02 15:21:19 +02:00
|
|
|
set->set_query_arena(this);
|
|
|
|
set_query_arena(backup);
|
2004-11-03 11:39:38 +01:00
|
|
|
#ifndef DBUG_OFF
|
2005-06-23 18:22:08 +02:00
|
|
|
backup->is_backup_arena= FALSE;
|
2004-09-23 11:48:17 +02:00
|
|
|
#endif
|
2004-11-03 11:39:38 +01:00
|
|
|
DBUG_VOID_RETURN;
|
2004-02-12 02:10:26 +01:00
|
|
|
}
|
|
|
|
|
2003-12-20 00:16:10 +01:00
|
|
|
Statement::~Statement()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
C_MODE_START
|
|
|
|
|
|
|
|
static byte *
|
|
|
|
get_statement_id_as_hash_key(const byte *record, uint *key_length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
|
|
|
{
|
|
|
|
const Statement *statement= (const Statement *) record;
|
|
|
|
*key_length= sizeof(statement->id);
|
|
|
|
return (byte *) &((const Statement *) statement)->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void delete_statement_as_hash_key(void *key)
|
|
|
|
{
|
|
|
|
delete (Statement *) key;
|
|
|
|
}
|
|
|
|
|
2004-04-30 18:08:38 +02:00
|
|
|
static byte *get_stmt_name_hash_key(Statement *entry, uint *length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
2004-04-12 23:58:48 +02:00
|
|
|
{
|
|
|
|
*length=(uint) entry->name.length;
|
|
|
|
return (byte*) entry->name.str;
|
|
|
|
}
|
|
|
|
|
2003-12-20 00:16:10 +01:00
|
|
|
C_MODE_END
|
|
|
|
|
|
|
|
Statement_map::Statement_map() :
|
|
|
|
last_found_statement(0)
|
|
|
|
{
|
2004-04-12 23:58:48 +02:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
START_STMT_HASH_SIZE = 16,
|
|
|
|
START_NAME_HASH_SIZE = 16
|
|
|
|
};
|
2004-09-08 10:33:05 +02:00
|
|
|
hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
|
2003-12-20 00:16:10 +01:00
|
|
|
get_statement_id_as_hash_key,
|
|
|
|
delete_statement_as_hash_key, MYF(0));
|
2004-08-29 17:44:28 +02:00
|
|
|
hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
|
2004-04-30 18:08:38 +02:00
|
|
|
(hash_get_key) get_stmt_name_hash_key,
|
|
|
|
NULL,MYF(0));
|
2003-12-20 00:16:10 +01:00
|
|
|
}
|
|
|
|
|
Implement WL#2661 "Prepared Statements: Dynamic SQL in Stored Procedures".
The idea of the patch is to separate statement processing logic,
such as parsing, validation of the parsed tree, execution and cleanup,
from global query processing logic, such as logging, resetting
priorities of a thread, resetting stored procedure cache, resetting
thread count of errors and warnings.
This makes PREPARE and EXECUTE behave similarly to the rest of SQL
statements and allows their use in stored procedures.
This patch contains a change in behaviour:
until recently for each SQL prepared statement command, 2 queries
were written to the general log, e.g.
[Query] prepare stmt from @stmt_text;
[Prepare] select * from t1 <-- contents of @stmt_text
The chagne was necessary to prevent [Prepare] commands from being written
to the general log when executing a stored procedure with Dynamic SQL.
We should consider whether the old behavior is preferrable and probably
restore it.
This patch refixes Bug#7115, Bug#10975 (partially), Bug#10605 (various bugs
in Dynamic SQL reported before it was disabled).
2005-09-03 01:13:18 +02:00
|
|
|
|
2006-04-07 21:37:06 +02:00
|
|
|
/*
|
|
|
|
Insert a new statement to the thread-local statement map.
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
If there was an old statement with the same name, replace it with the
|
|
|
|
new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
|
|
|
|
increase prepared_stmt_count, and insert the new statement. It's okay
|
|
|
|
to delete an old statement and fail to insert the new one.
|
|
|
|
|
|
|
|
POSTCONDITIONS
|
|
|
|
All named prepared statements are also present in names_hash.
|
|
|
|
Statement names in names_hash are unique.
|
|
|
|
The statement is added only if prepared_stmt_count < max_prepard_stmt_count
|
|
|
|
last_found_statement always points to a valid statement or is 0
|
|
|
|
|
|
|
|
RETURN VALUE
|
|
|
|
0 success
|
|
|
|
1 error: out of resources or max_prepared_stmt_count limit has been
|
|
|
|
reached. An error is sent to the client, the statement is deleted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int Statement_map::insert(THD *thd, Statement *statement)
|
2004-04-12 23:58:48 +02:00
|
|
|
{
|
2006-04-07 21:37:06 +02:00
|
|
|
if (my_hash_insert(&st_hash, (byte*) statement))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Delete is needed only in case of an insert failure. In all other
|
|
|
|
cases hash_delete will also delete the statement.
|
|
|
|
*/
|
|
|
|
delete statement;
|
|
|
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
|
|
|
goto err_st_hash;
|
|
|
|
}
|
2006-04-12 23:46:44 +02:00
|
|
|
if (statement->name.str && my_hash_insert(&names_hash, (byte*) statement))
|
2004-04-12 23:58:48 +02:00
|
|
|
{
|
2006-04-12 23:46:44 +02:00
|
|
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
|
|
|
goto err_names_hash;
|
2004-04-12 23:58:48 +02:00
|
|
|
}
|
2006-04-07 21:37:06 +02:00
|
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
|
|
/*
|
|
|
|
We don't check that prepared_stmt_count is <= max_prepared_stmt_count
|
|
|
|
because we would like to allow to lower the total limit
|
|
|
|
of prepared statements below the current count. In that case
|
|
|
|
no new statements can be added until prepared_stmt_count drops below
|
|
|
|
the limit.
|
|
|
|
*/
|
|
|
|
if (prepared_stmt_count >= max_prepared_stmt_count)
|
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
2006-04-12 23:46:44 +02:00
|
|
|
my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
|
|
|
|
max_prepared_stmt_count);
|
2006-04-07 21:37:06 +02:00
|
|
|
goto err_max;
|
|
|
|
}
|
|
|
|
prepared_stmt_count++;
|
|
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
|
2005-10-06 16:54:43 +02:00
|
|
|
last_found_statement= statement;
|
2006-04-07 21:37:06 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_max:
|
|
|
|
if (statement->name.str)
|
|
|
|
hash_delete(&names_hash, (byte*) statement);
|
|
|
|
err_names_hash:
|
|
|
|
hash_delete(&st_hash, (byte*) statement);
|
|
|
|
err_st_hash:
|
|
|
|
return 1;
|
2003-12-20 00:16:10 +01:00
|
|
|
}
|
|
|
|
|
2004-04-12 23:58:48 +02:00
|
|
|
|
2005-07-19 20:21:12 +02:00
|
|
|
void Statement_map::close_transient_cursors()
|
|
|
|
{
|
2005-09-22 00:11:21 +02:00
|
|
|
#ifdef TO_BE_IMPLEMENTED
|
2005-07-19 20:21:12 +02:00
|
|
|
Statement *stmt;
|
|
|
|
while ((stmt= transient_cursor_list.head()))
|
|
|
|
stmt->close_cursor(); /* deletes itself from the list */
|
2005-09-22 00:11:21 +02:00
|
|
|
#endif
|
2005-07-19 20:21:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-04-07 21:37:06 +02:00
|
|
|
void Statement_map::erase(Statement *statement)
|
|
|
|
{
|
|
|
|
if (statement == last_found_statement)
|
|
|
|
last_found_statement= 0;
|
|
|
|
if (statement->name.str)
|
|
|
|
hash_delete(&names_hash, (byte *) statement);
|
2006-04-12 23:46:44 +02:00
|
|
|
|
2006-04-07 21:37:06 +02:00
|
|
|
hash_delete(&st_hash, (byte *) statement);
|
|
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
|
|
DBUG_ASSERT(prepared_stmt_count > 0);
|
|
|
|
prepared_stmt_count--;
|
|
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Statement_map::reset()
|
|
|
|
{
|
|
|
|
/* Must be first, hash_free will reset st_hash.records */
|
|
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
|
|
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
|
|
|
|
prepared_stmt_count-= st_hash.records;
|
|
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
|
|
|
|
my_hash_reset(&names_hash);
|
|
|
|
my_hash_reset(&st_hash);
|
|
|
|
last_found_statement= 0;
|
2003-12-20 00:16:10 +01:00
|
|
|
}
|
|
|
|
|
2004-04-12 23:58:48 +02:00
|
|
|
|
2006-04-07 21:37:06 +02:00
|
|
|
Statement_map::~Statement_map()
|
|
|
|
{
|
|
|
|
/* Must go first, hash_free will reset st_hash.records */
|
|
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
|
|
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
|
|
|
|
prepared_stmt_count-= st_hash.records;
|
|
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
|
|
|
|
hash_free(&names_hash);
|
|
|
|
hash_free(&st_hash);
|
|
|
|
}
|
|
|
|
|
2002-10-16 15:55:08 +02:00
|
|
|
bool select_dumpvar::send_data(List<Item> &items)
|
|
|
|
{
|
2006-11-29 18:15:15 +01:00
|
|
|
List_iterator_fast<my_var> var_li(var_list);
|
2005-05-09 00:59:10 +02:00
|
|
|
List_iterator<Item> it(items);
|
2006-11-28 23:21:39 +01:00
|
|
|
Item *item;
|
2006-11-29 18:15:15 +01:00
|
|
|
my_var *mv;
|
2006-06-21 00:21:10 +02:00
|
|
|
DBUG_ENTER("select_dumpvar::send_data");
|
2002-10-16 15:55:08 +02:00
|
|
|
|
2003-01-18 15:23:37 +01:00
|
|
|
if (unit->offset_limit_cnt)
|
2006-06-21 00:21:10 +02:00
|
|
|
{ // using limit offset,count
|
2003-01-18 15:23:37 +01:00
|
|
|
unit->offset_limit_cnt--;
|
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
2002-10-16 15:55:08 +02:00
|
|
|
if (row_count++)
|
|
|
|
{
|
2004-11-12 13:34:00 +01:00
|
|
|
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
|
2002-10-16 15:55:08 +02:00
|
|
|
DBUG_RETURN(1);
|
|
|
|
}
|
2006-11-29 18:15:15 +01:00
|
|
|
while ((mv= var_li++) && (item= it++))
|
2003-10-24 20:14:26 +02:00
|
|
|
{
|
2006-11-29 18:15:15 +01:00
|
|
|
if (mv->local)
|
2003-01-18 17:21:13 +01:00
|
|
|
{
|
2006-11-30 19:09:48 +01:00
|
|
|
if (thd->spcont->set_variable(thd, mv->offset, &item))
|
|
|
|
DBUG_RETURN(1);
|
2003-01-18 17:21:13 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-11-30 19:09:48 +01:00
|
|
|
Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item);
|
|
|
|
suv->fix_fields(thd, 0);
|
|
|
|
suv->check(0);
|
2006-11-29 18:15:15 +01:00
|
|
|
suv->update();
|
2003-01-18 17:21:13 +01:00
|
|
|
}
|
2003-10-24 20:14:26 +02:00
|
|
|
}
|
2002-10-11 20:49:10 +02:00
|
|
|
DBUG_RETURN(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool select_dumpvar::send_eof()
|
|
|
|
{
|
2003-11-19 11:26:18 +01:00
|
|
|
if (! row_count)
|
2004-11-24 17:22:29 +01:00
|
|
|
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
|
|
|
ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA));
|
2003-11-19 11:26:18 +01:00
|
|
|
::send_ok(thd,row_count);
|
|
|
|
return 0;
|
2002-10-11 20:49:10 +02:00
|
|
|
}
|
2003-11-28 11:18:13 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
TMP_TABLE_PARAM
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void TMP_TABLE_PARAM::init()
|
|
|
|
{
|
2006-01-18 12:48:57 +01:00
|
|
|
DBUG_ENTER("TMP_TABLE_PARAM::init");
|
|
|
|
DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
|
2003-11-28 11:18:13 +01:00
|
|
|
field_count= sum_func_count= func_count= hidden_field_count= 0;
|
|
|
|
group_parts= group_length= group_null_parts= 0;
|
|
|
|
quick_group= 1;
|
2004-11-18 10:16:06 +01:00
|
|
|
table_charset= 0;
|
2005-11-30 11:52:12 +01:00
|
|
|
precomputed_group_by= 0;
|
2006-01-18 12:48:57 +01:00
|
|
|
DBUG_VOID_RETURN;
|
2003-11-28 11:18:13 +01:00
|
|
|
}
|
2004-09-13 15:48:01 +02:00
|
|
|
|
|
|
|
|
|
|
|
void thd_increment_bytes_sent(ulong length)
|
|
|
|
{
|
2005-03-09 19:22:30 +01:00
|
|
|
THD *thd=current_thd;
|
2005-03-09 20:49:44 +01:00
|
|
|
if (likely(thd != 0))
|
2005-03-09 19:22:30 +01:00
|
|
|
{ /* current_thd==0 when close_connection() calls net_send_error() */
|
|
|
|
thd->status_var.bytes_sent+= length;
|
|
|
|
}
|
2004-09-13 15:48:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void thd_increment_bytes_received(ulong length)
|
|
|
|
{
|
|
|
|
current_thd->status_var.bytes_received+= length;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void thd_increment_net_big_packet_count(ulong length)
|
|
|
|
{
|
|
|
|
current_thd->status_var.net_big_packet_count+= length;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void THD::set_status_var_init()
|
|
|
|
{
|
|
|
|
bzero((char*) &status_var, sizeof(status_var));
|
|
|
|
}
|
2005-07-13 11:48:13 +02:00
|
|
|
|
2005-09-15 21:29:07 +02:00
|
|
|
|
2005-09-20 20:20:38 +02:00
|
|
|
void Security_context::init()
|
2005-09-15 21:29:07 +02:00
|
|
|
{
|
|
|
|
host= user= priv_user= ip= 0;
|
|
|
|
host_or_ip= "connecting host";
|
2006-07-04 21:46:15 +02:00
|
|
|
priv_host[0]= '\0';
|
2005-09-15 21:29:07 +02:00
|
|
|
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
|
|
|
db_access= NO_ACCESS;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-20 20:20:38 +02:00
|
|
|
void Security_context::destroy()
|
2005-09-15 21:29:07 +02:00
|
|
|
{
|
|
|
|
// If not pointer to constant
|
|
|
|
if (host != my_localhost)
|
|
|
|
safeFree(host);
|
|
|
|
if (user != delayed_user)
|
|
|
|
safeFree(user);
|
|
|
|
safeFree(ip);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-20 20:20:38 +02:00
|
|
|
void Security_context::skip_grants()
|
2005-09-15 21:29:07 +02:00
|
|
|
{
|
|
|
|
/* privileges for the user are unknown everything is allowed */
|
|
|
|
host_or_ip= (char *)"";
|
|
|
|
master_access= ~NO_ACCESS;
|
|
|
|
priv_user= (char *)"";
|
|
|
|
*priv_host= '\0';
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-07-13 11:48:13 +02:00
|
|
|
/****************************************************************************
|
|
|
|
Handling of open and locked tables states.
|
|
|
|
|
|
|
|
This is used when we want to open/lock (and then close) some tables when
|
|
|
|
we already have a set of tables open and locked. We use these methods for
|
|
|
|
access to mysql.proc table to find definitions of stored routines.
|
|
|
|
****************************************************************************/
|
|
|
|
|
2005-08-08 15:46:06 +02:00
|
|
|
void THD::reset_n_backup_open_tables_state(Open_tables_state *backup)
|
2005-07-13 11:48:13 +02:00
|
|
|
{
|
2005-08-08 15:46:06 +02:00
|
|
|
DBUG_ENTER("reset_n_backup_open_tables_state");
|
|
|
|
backup->set_open_tables_state(this);
|
2005-07-13 11:48:13 +02:00
|
|
|
reset_open_tables_state();
|
2005-08-08 15:46:06 +02:00
|
|
|
DBUG_VOID_RETURN;
|
2005-07-13 11:48:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-08-08 15:46:06 +02:00
|
|
|
void THD::restore_backup_open_tables_state(Open_tables_state *backup)
|
|
|
|
{
|
|
|
|
DBUG_ENTER("restore_backup_open_tables_state");
|
|
|
|
/*
|
|
|
|
Before we will throw away current open tables state we want
|
|
|
|
to be sure that it was properly cleaned up.
|
|
|
|
*/
|
|
|
|
DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 &&
|
|
|
|
handler_tables == 0 && derived_tables == 0 &&
|
|
|
|
lock == 0 && locked_tables == 0 &&
|
|
|
|
prelocked_mode == NON_PRELOCKED);
|
|
|
|
set_open_tables_state(backup);
|
2005-07-13 11:48:13 +02:00
|
|
|
DBUG_VOID_RETURN;
|
|
|
|
}
|
2005-08-15 17:15:12 +02:00
|
|
|
|
|
|
|
|
2005-08-15 17:35:48 +02:00
|
|
|
|
2005-08-15 17:15:12 +02:00
|
|
|
/****************************************************************************
|
|
|
|
Handling of statement states in functions and triggers.
|
|
|
|
|
|
|
|
This is used to ensure that the function/trigger gets a clean state
|
|
|
|
to work with and does not cause any side effects of the calling statement.
|
|
|
|
|
|
|
|
It also allows most stored functions and triggers to replicate even
|
|
|
|
if they are used items that would normally be stored in the binary
|
|
|
|
replication (like last_insert_id() etc...)
|
|
|
|
|
|
|
|
The following things is done
|
|
|
|
- Disable binary logging for the duration of the statement
|
|
|
|
- Disable multi-result-sets for the duration of the statement
|
2006-04-21 16:55:04 +02:00
|
|
|
- Value of last_insert_id() is saved and restored
|
2005-08-15 17:15:12 +02:00
|
|
|
- Value set by 'SET INSERT_ID=#' is reset and restored
|
|
|
|
- Value for found_rows() is reset and restored
|
|
|
|
- examined_row_count is added to the total
|
|
|
|
- cuted_fields is added to the total
|
2005-11-19 13:09:23 +01:00
|
|
|
- new savepoint level is created and destroyed
|
2005-08-15 17:15:12 +02:00
|
|
|
|
|
|
|
NOTES:
|
|
|
|
Seed for random() is saved for the first! usage of RAND()
|
|
|
|
We reset examined_row_count and cuted_fields and add these to the
|
|
|
|
result to ensure that if we have a bug that would reset these within
|
|
|
|
a function, we are not loosing any rows from the main statement.
|
2006-04-21 16:55:04 +02:00
|
|
|
|
|
|
|
We do not reset value of last_insert_id().
|
2005-08-15 17:15:12 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void THD::reset_sub_statement_state(Sub_statement_state *backup,
|
|
|
|
uint new_state)
|
|
|
|
{
|
|
|
|
backup->options= options;
|
|
|
|
backup->in_sub_stmt= in_sub_stmt;
|
|
|
|
backup->no_send_ok= net.no_send_ok;
|
|
|
|
backup->enable_slow_log= enable_slow_log;
|
|
|
|
backup->last_insert_id= last_insert_id;
|
|
|
|
backup->next_insert_id= next_insert_id;
|
2006-06-16 11:05:58 +02:00
|
|
|
backup->current_insert_id= current_insert_id;
|
2005-08-15 17:15:12 +02:00
|
|
|
backup->insert_id_used= insert_id_used;
|
2006-06-16 11:05:58 +02:00
|
|
|
backup->last_insert_id_used= last_insert_id_used;
|
2005-12-01 11:26:46 +01:00
|
|
|
backup->clear_next_insert_id= clear_next_insert_id;
|
2005-08-15 17:15:12 +02:00
|
|
|
backup->limit_found_rows= limit_found_rows;
|
|
|
|
backup->examined_row_count= examined_row_count;
|
|
|
|
backup->sent_row_count= sent_row_count;
|
|
|
|
backup->cuted_fields= cuted_fields;
|
|
|
|
backup->client_capabilities= client_capabilities;
|
2005-11-19 13:09:23 +01:00
|
|
|
backup->savepoints= transaction.savepoints;
|
2005-08-15 17:15:12 +02:00
|
|
|
|
2005-08-25 15:34:34 +02:00
|
|
|
if (!lex->requires_prelocking() || is_update_query(lex->sql_command))
|
|
|
|
options&= ~OPTION_BIN_LOG;
|
2007-02-23 18:58:56 +01:00
|
|
|
|
|
|
|
if ((backup->options & OPTION_BIN_LOG) && is_update_query(lex->sql_command))
|
|
|
|
mysql_bin_log.start_union_events(this, this->query_id);
|
|
|
|
|
2005-08-15 17:15:12 +02:00
|
|
|
/* Disable result sets */
|
|
|
|
client_capabilities &= ~CLIENT_MULTI_RESULTS;
|
|
|
|
in_sub_stmt|= new_state;
|
|
|
|
next_insert_id= 0;
|
|
|
|
insert_id_used= 0;
|
|
|
|
examined_row_count= 0;
|
|
|
|
sent_row_count= 0;
|
|
|
|
cuted_fields= 0;
|
2005-11-19 13:09:23 +01:00
|
|
|
transaction.savepoints= 0;
|
2005-08-15 17:15:12 +02:00
|
|
|
|
|
|
|
/* Surpress OK packets in case if we will execute statements */
|
|
|
|
net.no_send_ok= TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void THD::restore_sub_statement_state(Sub_statement_state *backup)
|
|
|
|
{
|
2005-11-19 13:09:23 +01:00
|
|
|
/*
|
|
|
|
To save resources we want to release savepoints which were created
|
|
|
|
during execution of function or trigger before leaving their savepoint
|
|
|
|
level. It is enough to release first savepoint set on this level since
|
|
|
|
all later savepoints will be released automatically.
|
|
|
|
*/
|
|
|
|
if (transaction.savepoints)
|
|
|
|
{
|
|
|
|
SAVEPOINT *sv;
|
|
|
|
for (sv= transaction.savepoints; sv->prev; sv= sv->prev)
|
|
|
|
{}
|
|
|
|
/* ha_release_savepoint() never returns error. */
|
|
|
|
(void)ha_release_savepoint(this, sv);
|
|
|
|
}
|
|
|
|
transaction.savepoints= backup->savepoints;
|
2005-08-15 17:15:12 +02:00
|
|
|
options= backup->options;
|
|
|
|
in_sub_stmt= backup->in_sub_stmt;
|
|
|
|
net.no_send_ok= backup->no_send_ok;
|
|
|
|
enable_slow_log= backup->enable_slow_log;
|
|
|
|
last_insert_id= backup->last_insert_id;
|
|
|
|
next_insert_id= backup->next_insert_id;
|
2006-06-16 11:05:58 +02:00
|
|
|
current_insert_id= backup->current_insert_id;
|
2005-08-15 17:15:12 +02:00
|
|
|
insert_id_used= backup->insert_id_used;
|
2006-06-16 11:05:58 +02:00
|
|
|
last_insert_id_used= backup->last_insert_id_used;
|
2005-12-01 11:26:46 +01:00
|
|
|
clear_next_insert_id= backup->clear_next_insert_id;
|
2005-08-15 17:15:12 +02:00
|
|
|
limit_found_rows= backup->limit_found_rows;
|
|
|
|
sent_row_count= backup->sent_row_count;
|
|
|
|
client_capabilities= backup->client_capabilities;
|
|
|
|
|
2007-02-23 18:58:56 +01:00
|
|
|
if ((options & OPTION_BIN_LOG) && is_update_query(lex->sql_command))
|
|
|
|
mysql_bin_log.stop_union_events(this);
|
|
|
|
|
2005-08-15 17:15:12 +02:00
|
|
|
/*
|
|
|
|
The following is added to the old values as we are interested in the
|
|
|
|
total complexity of the query
|
|
|
|
*/
|
|
|
|
examined_row_count+= backup->examined_row_count;
|
|
|
|
cuted_fields+= backup->cuted_fields;
|
|
|
|
}
|
2005-08-15 17:35:48 +02:00
|
|
|
|
|
|
|
|
|
|
|
/***************************************************************************
|
|
|
|
Handling of XA id cacheing
|
|
|
|
***************************************************************************/
|
|
|
|
|
2005-08-12 21:15:01 +02:00
|
|
|
pthread_mutex_t LOCK_xid_cache;
|
|
|
|
HASH xid_cache;
|
|
|
|
|
|
|
|
static byte *xid_get_hash_key(const byte *ptr,uint *length,
|
|
|
|
my_bool not_used __attribute__((unused)))
|
|
|
|
{
|
2005-10-05 16:38:53 +02:00
|
|
|
*length=((XID_STATE*)ptr)->xid.key_length();
|
|
|
|
return ((XID_STATE*)ptr)->xid.key();
|
2005-08-12 21:15:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xid_free_hash (void *ptr)
|
|
|
|
{
|
|
|
|
if (!((XID_STATE*)ptr)->in_thd)
|
2005-08-25 23:24:43 +02:00
|
|
|
my_free((gptr)ptr, MYF(0));
|
2005-08-12 21:15:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool xid_cache_init()
|
|
|
|
{
|
|
|
|
pthread_mutex_init(&LOCK_xid_cache, MY_MUTEX_INIT_FAST);
|
2005-08-14 23:20:06 +02:00
|
|
|
return hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
|
|
|
|
xid_get_hash_key, xid_free_hash, 0) != 0;
|
2005-08-12 21:15:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void xid_cache_free()
|
|
|
|
{
|
|
|
|
if (hash_inited(&xid_cache))
|
|
|
|
{
|
|
|
|
hash_free(&xid_cache);
|
|
|
|
pthread_mutex_destroy(&LOCK_xid_cache);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
XID_STATE *xid_cache_search(XID *xid)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
2005-10-05 16:38:53 +02:00
|
|
|
XID_STATE *res=(XID_STATE *)hash_search(&xid_cache, xid->key(), xid->key_length());
|
2005-08-12 21:15:01 +02:00
|
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2005-08-15 17:35:48 +02:00
|
|
|
|
2005-08-12 21:15:01 +02:00
|
|
|
bool xid_cache_insert(XID *xid, enum xa_states xa_state)
|
|
|
|
{
|
|
|
|
XID_STATE *xs;
|
|
|
|
my_bool res;
|
|
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
2005-10-05 16:38:53 +02:00
|
|
|
if (hash_search(&xid_cache, xid->key(), xid->key_length()))
|
2005-08-12 21:15:01 +02:00
|
|
|
res=0;
|
|
|
|
else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME))))
|
|
|
|
res=1;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
xs->xa_state=xa_state;
|
|
|
|
xs->xid.set(xid);
|
|
|
|
xs->in_thd=0;
|
|
|
|
res=my_hash_insert(&xid_cache, (byte*)xs);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2005-08-15 17:35:48 +02:00
|
|
|
|
2005-08-12 21:15:01 +02:00
|
|
|
bool xid_cache_insert(XID_STATE *xid_state)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
2005-10-05 16:38:53 +02:00
|
|
|
DBUG_ASSERT(hash_search(&xid_cache, xid_state->xid.key(),
|
|
|
|
xid_state->xid.key_length())==0);
|
2005-08-12 21:15:01 +02:00
|
|
|
my_bool res=my_hash_insert(&xid_cache, (byte*)xid_state);
|
|
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2005-08-15 17:35:48 +02:00
|
|
|
|
2005-08-12 21:15:01 +02:00
|
|
|
void xid_cache_delete(XID_STATE *xid_state)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
|
|
|
hash_delete(&xid_cache, (byte *)xid_state);
|
|
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
|
|
}
|
|
|
|
|