mirror of
https://github.com/MariaDB/server.git
synced 2025-01-17 12:32:27 +01:00
22e793639a
- If USER is given, all threads for that user is signaled - If SOFT is used then the KILL will not be sent to the handler. This can be used to not interrupt critical things in the handler like 'REPAIR'. Internally added more kill signals. This gives us more information of why a query/connection was killed. - KILL_SERVER is used when server is going down. In this case the users gets ER_SHUTDOWN as the reason connection was killed. - Changed signals to number in correct order, which makes it easier to test how the signal should affect the code. - New error message ER_CONNECTION_KILLED if connection was killed by 'KILL CONNECTION'. Before we got error ER_SHUTDOWN. Changed names of not used parameters KILL_QUERY & KILL_CONNCTION to mysql_kill() to not conflict with defines in the server include/mysql.h.pp: Updated file include/mysql_com.h: Changed names of not used parameters KILL_QUERY & KILL_CONNCTION to mysql_kill() to not conflict with defines in the server mysql-test/r/kill.result: Added test of KILL USER mysql-test/suite/rpl/r/rpl_stm_000001.result: Updated error code mysql-test/suite/rpl/t/rpl_stm_000001.test: Updated error codes mysql-test/t/flush_read_lock_kill.test: Updated error codes mysql-test/t/kill.test: Added test of KILL USER plugin/handler_socket/handlersocket/database.cpp: Removed THD:: from KILL sql/debug_sync.cc: Removed THD:: from KILL sql/event_scheduler.cc: Removed THD:: from KILL sql/filesort.cc: Removed THD:: from KILL sql/ha_ndbcluster_binlog.cc: Removed THD:: from KILL sql/handler.cc: Removed THD:: from KILL Simplify code. sql/lex.h: Added new keywords HARD | SOFT sql/log.cc: Removed THD:: from KILL Added testing of new error ER_CONNECTION_KILLED sql/log_event.cc: Removed THD:: from KILL Added testing of new error ER_CONNECTION_KILLED sql/mysql_priv.h: Added new prototypes sql/mysqld.cc: Removed THD:: from KILL Use KILL_SERVER_HARD signal on shutdown. sql/scheduler.cc: Removed THD:: from KILL Simplify test if connection should be killed sql/share/errmsg.txt: New error message ER_CONNECTION_KILLED sql/slave.cc: Removed THD:: from KILL sql/sp_head.cc: Removed THD:: from KILL sql/sql_base.cc: Removed THD:: from KILL sql/sql_cache.cc: Removed THD:: from KILL sql/sql_class.cc: Removed THD:: from KILL Added killed_errno() Only signal kill to storage engine if HARD bit is set. sql/sql_class.h: Move KILL options out from THD to make them easier to use in sql_yacc.yy sql/sql_connect.cc: Removed THD:: from KILL sql/sql_delete.cc: Removed THD:: from KILL sql/sql_error.cc: Removed THD:: from KILL sql/sql_insert.cc: Removed THD:: from KILL Simplifed testing if thread is killed. sql/sql_lex.h: Added kill options to st_lex sql/sql_load.cc: Removed THD:: from KILL sql/sql_parse.cc: Added kill options to st_lex Simplifed and optimzed testing of thd->killed at end of query Added support for KILL USER Extended sql_kill() to allow use of more kill signals. sql/sql_repl.cc: Removed THD:: from KILL sql/sql_show.cc: Removed THD:: from KILL Simplied testing if query/connection was killed sql/sql_table.cc: Removed THD:: from KILL sql/sql_update.cc: Removed THD:: from KILL sql/sql_yacc.yy: Added support for new KILL syntax: KILL [HARD|SOFT] [CONNECTION|QUERY] [ID | USER user_name] storage/archive/ha_archive.cc: Simplify compilation storage/maria/ha_maria.cc: Removed THD:: from KILL
4490 lines
125 KiB
C++
4490 lines
125 KiB
C++
/* Copyright (C) 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
|
|
|
|
/*****************************************************************************
|
|
**
|
|
** This file implements classes defined in sql_class.h
|
|
** Especially the classes to handle a result from a select
|
|
**
|
|
*****************************************************************************/
|
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
|
#pragma implementation // gcc: Class implementation
|
|
#endif
|
|
|
|
#include "mysql_priv.h"
|
|
#include "rpl_rli.h"
|
|
#include "rpl_filter.h"
|
|
#include "rpl_record.h"
|
|
#include "slave.h"
|
|
#include <my_bitmap.h>
|
|
#include "log_event.h"
|
|
#include <m_ctype.h>
|
|
#include <sys/stat.h>
|
|
#include <thr_alarm.h>
|
|
#ifdef __WIN__
|
|
#include <io.h>
|
|
#endif
|
|
#include <mysys_err.h>
|
|
|
|
#include "sp_rcontext.h"
|
|
#include "sp_cache.h"
|
|
#include "sql_select.h" /* declares create_tmp_table() */
|
|
#include "debug_sync.h"
|
|
#include "sql_handler.h"
|
|
|
|
/*
|
|
The following is used to initialise Table_ident with a internal
|
|
table name
|
|
*/
|
|
char internal_table_name[2]= "*";
|
|
char empty_c_string[1]= {0}; /* used for not defined db */
|
|
|
|
const char * const THD::DEFAULT_WHERE= "field list";
|
|
|
|
|
|
/*****************************************************************************
|
|
** Instansiate templates
|
|
*****************************************************************************/
|
|
|
|
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
|
|
/* Used templates */
|
|
template class List<Key>;
|
|
template class List_iterator<Key>;
|
|
template class List<Key_part_spec>;
|
|
template class List_iterator<Key_part_spec>;
|
|
template class List<Alter_drop>;
|
|
template class List_iterator<Alter_drop>;
|
|
template class List<Alter_column>;
|
|
template class List_iterator<Alter_column>;
|
|
#endif
|
|
|
|
/****************************************************************************
|
|
** User variables
|
|
****************************************************************************/
|
|
|
|
extern "C" uchar *get_var_key(user_var_entry *entry, size_t *length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
*length= entry->name.length;
|
|
return (uchar*) entry->name.str;
|
|
}
|
|
|
|
extern "C" void free_user_var(user_var_entry *entry)
|
|
{
|
|
char *pos= (char*) entry+ALIGN_SIZE(sizeof(*entry));
|
|
if (entry->value && entry->value != pos)
|
|
my_free(entry->value, MYF(0));
|
|
my_free((char*) entry,MYF(0));
|
|
}
|
|
|
|
bool Key_part_spec::operator==(const Key_part_spec& other) const
|
|
{
|
|
return length == other.length &&
|
|
!my_strcasecmp(system_charset_info, field_name,
|
|
other.field_name);
|
|
}
|
|
|
|
/**
|
|
Construct an (almost) deep copy of this key. Only those
|
|
elements that are known to never change are not copied.
|
|
If out of memory, a partial copy is returned and an error is set
|
|
in THD.
|
|
*/
|
|
|
|
Key::Key(const Key &rhs, MEM_ROOT *mem_root)
|
|
:type(rhs.type),
|
|
key_create_info(rhs.key_create_info),
|
|
columns(rhs.columns, mem_root),
|
|
name(rhs.name),
|
|
option_list(rhs.option_list),
|
|
generated(rhs.generated)
|
|
{
|
|
list_copy_and_replace_each_value(columns, mem_root);
|
|
}
|
|
|
|
/**
|
|
Construct an (almost) deep copy of this foreign key. Only those
|
|
elements that are known to never change are not copied.
|
|
If out of memory, a partial copy is returned and an error is set
|
|
in THD.
|
|
*/
|
|
|
|
Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root)
|
|
:Key(rhs),
|
|
ref_table(rhs.ref_table),
|
|
ref_columns(rhs.ref_columns),
|
|
delete_opt(rhs.delete_opt),
|
|
update_opt(rhs.update_opt),
|
|
match_opt(rhs.match_opt)
|
|
{
|
|
list_copy_and_replace_each_value(ref_columns, mem_root);
|
|
}
|
|
|
|
/*
|
|
Test if a foreign key (= generated key) is a prefix of the given key
|
|
(ignoring key name, key type and order of columns)
|
|
|
|
NOTES:
|
|
This is only used to test if an index for a FOREIGN KEY exists
|
|
|
|
IMPLEMENTATION
|
|
We only compare field names
|
|
|
|
RETURN
|
|
0 Generated key is a prefix of other key
|
|
1 Not equal
|
|
*/
|
|
|
|
bool foreign_key_prefix(Key *a, Key *b)
|
|
{
|
|
/* Ensure that 'a' is the generated key */
|
|
if (a->generated)
|
|
{
|
|
if (b->generated && a->columns.elements > b->columns.elements)
|
|
swap_variables(Key*, a, b); // Put shorter key in 'a'
|
|
}
|
|
else
|
|
{
|
|
if (!b->generated)
|
|
return TRUE; // No foreign key
|
|
swap_variables(Key*, a, b); // Put generated key in 'a'
|
|
}
|
|
|
|
/* Test if 'a' is a prefix of 'b' */
|
|
if (a->columns.elements > b->columns.elements)
|
|
return TRUE; // Can't be prefix
|
|
|
|
List_iterator<Key_part_spec> col_it1(a->columns);
|
|
List_iterator<Key_part_spec> col_it2(b->columns);
|
|
const Key_part_spec *col1, *col2;
|
|
|
|
#ifdef ENABLE_WHEN_INNODB_CAN_HANDLE_SWAPED_FOREIGN_KEY_COLUMNS
|
|
while ((col1= col_it1++))
|
|
{
|
|
bool found= 0;
|
|
col_it2.rewind();
|
|
while ((col2= col_it2++))
|
|
{
|
|
if (*col1 == *col2)
|
|
{
|
|
found= TRUE;
|
|
break;
|
|
}
|
|
}
|
|
if (!found)
|
|
return TRUE; // Error
|
|
}
|
|
return FALSE; // Is prefix
|
|
#else
|
|
while ((col1= col_it1++))
|
|
{
|
|
col2= col_it2++;
|
|
if (!(*col1 == *col2))
|
|
return TRUE;
|
|
}
|
|
return FALSE; // Is prefix
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
@brief
|
|
Check if the foreign key options are compatible with the specification
|
|
of the columns on which the key is created
|
|
|
|
@retval
|
|
FALSE The foreign key options are compatible with key columns
|
|
@retval
|
|
TRUE Otherwise
|
|
*/
|
|
bool Foreign_key::validate(List<Create_field> &table_fields)
|
|
{
|
|
Create_field *sql_field;
|
|
Key_part_spec *column;
|
|
List_iterator<Key_part_spec> cols(columns);
|
|
List_iterator<Create_field> it(table_fields);
|
|
DBUG_ENTER("Foreign_key::validate");
|
|
while ((column= cols++))
|
|
{
|
|
it.rewind();
|
|
while ((sql_field= it++) &&
|
|
my_strcasecmp(system_charset_info,
|
|
column->field_name,
|
|
sql_field->field_name)) {}
|
|
if (!sql_field)
|
|
{
|
|
my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name);
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
if (type == Key::FOREIGN_KEY && sql_field->vcol_info)
|
|
{
|
|
if (delete_opt == FK_OPTION_SET_NULL)
|
|
{
|
|
my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
|
|
"ON DELETE SET NULL");
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
if (update_opt == FK_OPTION_SET_NULL)
|
|
{
|
|
my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
|
|
"ON UPDATE SET NULL");
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
if (update_opt == FK_OPTION_CASCADE)
|
|
{
|
|
my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
|
|
"ON UPDATE CASCADE");
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
}
|
|
}
|
|
DBUG_RETURN(FALSE);
|
|
}
|
|
|
|
/****************************************************************************
|
|
** Thread specific functions
|
|
****************************************************************************/
|
|
|
|
/** Push an error to the error stack and return TRUE for now. */
|
|
|
|
bool
|
|
Reprepare_observer::report_error(THD *thd)
|
|
{
|
|
my_error(ER_NEED_REPREPARE, MYF(ME_NO_WARNING_FOR_ERROR|ME_NO_SP_HANDLER));
|
|
|
|
m_invalidated= TRUE;
|
|
|
|
return TRUE;
|
|
}
|
|
|
|
|
|
Open_tables_state::Open_tables_state(ulong version_arg)
|
|
:version(version_arg), state_flags(0U)
|
|
{
|
|
reset_open_tables_state();
|
|
}
|
|
|
|
/*
|
|
The following functions form part of the C plugin API
|
|
*/
|
|
|
|
extern "C" int mysql_tmpfile(const char *prefix)
|
|
{
|
|
char filename[FN_REFLEN];
|
|
File fd = create_temp_file(filename, mysql_tmpdir, prefix,
|
|
#ifdef __WIN__
|
|
O_BINARY | O_TRUNC | O_SEQUENTIAL |
|
|
O_SHORT_LIVED |
|
|
#endif /* __WIN__ */
|
|
O_CREAT | O_EXCL | O_RDWR | O_TEMPORARY,
|
|
MYF(MY_WME));
|
|
if (fd >= 0) {
|
|
#ifndef __WIN__
|
|
/*
|
|
This can be removed once the following bug is fixed:
|
|
Bug #28903 create_temp_file() doesn't honor O_TEMPORARY option
|
|
(file not removed) (Unix)
|
|
*/
|
|
unlink(filename);
|
|
#endif /* !__WIN__ */
|
|
}
|
|
|
|
return fd;
|
|
}
|
|
|
|
|
|
extern "C"
|
|
int thd_in_lock_tables(const THD *thd)
|
|
{
|
|
return test(thd->in_lock_tables);
|
|
}
|
|
|
|
|
|
extern "C"
|
|
int thd_tablespace_op(const THD *thd)
|
|
{
|
|
return test(thd->tablespace_op);
|
|
}
|
|
|
|
|
|
extern "C"
|
|
const char *set_thd_proc_info(THD *thd, const char *info,
|
|
const char *calling_function,
|
|
const char *calling_file,
|
|
const unsigned int calling_line)
|
|
{
|
|
if (!thd)
|
|
thd= current_thd;
|
|
|
|
const char *old_info= thd->proc_info;
|
|
DBUG_PRINT("proc_info", ("%s:%d %s", calling_file, calling_line,
|
|
(info != NULL) ? info : ""));
|
|
#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
|
|
thd->profiling.status_change(info, calling_function, calling_file, calling_line);
|
|
#endif
|
|
thd->proc_info= info;
|
|
return old_info;
|
|
}
|
|
|
|
extern "C"
|
|
void **thd_ha_data(const THD *thd, const struct handlerton *hton)
|
|
{
|
|
return (void **) &thd->ha_data[hton->slot].ha_ptr;
|
|
}
|
|
|
|
|
|
/**
|
|
Provide a handler data getter to simplify coding
|
|
*/
|
|
extern "C"
|
|
void *thd_get_ha_data(const THD *thd, const struct handlerton *hton)
|
|
{
|
|
return *thd_ha_data(thd, hton);
|
|
}
|
|
|
|
|
|
/**
|
|
Provide a handler data setter to simplify coding
|
|
@see thd_set_ha_data() definition in plugin.h
|
|
*/
|
|
extern "C"
|
|
void thd_set_ha_data(THD *thd, const struct handlerton *hton,
|
|
const void *ha_data)
|
|
{
|
|
plugin_ref *lock= &thd->ha_data[hton->slot].lock;
|
|
if (ha_data && !*lock)
|
|
*lock= ha_lock_engine(NULL, (handlerton*) hton);
|
|
else if (!ha_data && *lock)
|
|
{
|
|
plugin_unlock(NULL, *lock);
|
|
*lock= NULL;
|
|
}
|
|
*thd_ha_data(thd, hton)= (void*) ha_data;
|
|
}
|
|
|
|
|
|
extern "C"
|
|
long long thd_test_options(const THD *thd, long long test_options)
|
|
{
|
|
return thd->options & test_options;
|
|
}
|
|
|
|
extern "C"
|
|
int thd_sql_command(const THD *thd)
|
|
{
|
|
return (int) thd->lex->sql_command;
|
|
}
|
|
|
|
extern "C"
|
|
int thd_tx_isolation(const THD *thd)
|
|
{
|
|
return (int) thd->variables.tx_isolation;
|
|
}
|
|
|
|
extern "C"
|
|
void thd_inc_row_count(THD *thd)
|
|
{
|
|
thd->row_count++;
|
|
}
|
|
|
|
|
|
/**
|
|
Dumps a text description of a thread, its security context
|
|
(user, host) and the current query.
|
|
|
|
@param thd thread context
|
|
@param buffer pointer to preferred result buffer
|
|
@param length length of buffer
|
|
@param max_query_len how many chars of query to copy (0 for all)
|
|
|
|
@req LOCK_thread_count
|
|
|
|
@note LOCK_thread_count mutex is not necessary when the function is invoked on
|
|
the currently running thread (current_thd) or if the caller in some other
|
|
way guarantees that access to thd->query is serialized.
|
|
|
|
@return Pointer to string
|
|
*/
|
|
|
|
extern "C"
|
|
char *thd_security_context(THD *thd, char *buffer, unsigned int length,
|
|
unsigned int max_query_len)
|
|
{
|
|
String str(buffer, length, &my_charset_latin1);
|
|
const Security_context *sctx= &thd->main_security_ctx;
|
|
char header[64];
|
|
int len;
|
|
/*
|
|
The pointers thd->query and thd->proc_info might change since they are
|
|
being modified concurrently. This is acceptable for proc_info since its
|
|
values doesn't have to very accurate and the memory it points to is static,
|
|
but we need to attempt a snapshot on the pointer values to avoid using NULL
|
|
values. The pointer to thd->query however, doesn't point to static memory
|
|
and has to be protected by LOCK_thread_count or risk pointing to
|
|
uninitialized memory.
|
|
*/
|
|
const char *proc_info= thd->proc_info;
|
|
|
|
len= my_snprintf(header, sizeof(header),
|
|
"MySQL thread id %lu, query id %lu",
|
|
thd->thread_id, (ulong) thd->query_id);
|
|
str.length(0);
|
|
str.append(header, len);
|
|
|
|
if (sctx->host)
|
|
{
|
|
str.append(' ');
|
|
str.append(sctx->host);
|
|
}
|
|
|
|
if (sctx->ip)
|
|
{
|
|
str.append(' ');
|
|
str.append(sctx->ip);
|
|
}
|
|
|
|
if (sctx->user)
|
|
{
|
|
str.append(' ');
|
|
str.append(sctx->user);
|
|
}
|
|
|
|
if (proc_info)
|
|
{
|
|
str.append(' ');
|
|
str.append(proc_info);
|
|
}
|
|
|
|
pthread_mutex_lock(&thd->LOCK_thd_data);
|
|
|
|
if (thd->query())
|
|
{
|
|
if (max_query_len < 1)
|
|
len= thd->query_length();
|
|
else
|
|
len= min(thd->query_length(), max_query_len);
|
|
str.append('\n');
|
|
str.append(thd->query(), len);
|
|
}
|
|
|
|
pthread_mutex_unlock(&thd->LOCK_thd_data);
|
|
|
|
if (str.c_ptr_safe() == buffer)
|
|
return buffer;
|
|
|
|
/*
|
|
We have to copy the new string to the destination buffer because the string
|
|
was reallocated to a larger buffer to be able to fit.
|
|
*/
|
|
DBUG_ASSERT(buffer != NULL);
|
|
length= min(str.length(), length-1);
|
|
memcpy(buffer, str.c_ptr_quick(), length);
|
|
/* Make sure that the new string is null terminated */
|
|
buffer[length]= '\0';
|
|
return buffer;
|
|
}
|
|
|
|
|
|
/**
|
|
Implementation of Drop_table_error_handler::handle_error().
|
|
The reason in having this implementation is to silence technical low-level
|
|
warnings during DROP TABLE operation. Currently we don't want to expose
|
|
the following warnings during DROP TABLE:
|
|
- Some of table files are missed or invalid (the table is going to be
|
|
deleted anyway, so why bother that something was missed);
|
|
- A trigger associated with the table does not have DEFINER (One of the
|
|
MySQL specifics now is that triggers are loaded for the table being
|
|
dropped. So, we may have a warning that trigger does not have DEFINER
|
|
attribute during DROP TABLE operation).
|
|
|
|
@return TRUE if the condition is handled.
|
|
*/
|
|
bool Drop_table_error_handler::handle_error(uint sql_errno,
|
|
const char *message,
|
|
MYSQL_ERROR::enum_warning_level level,
|
|
THD *thd)
|
|
{
|
|
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
|
|
sql_errno == ER_TRG_NO_DEFINER);
|
|
}
|
|
|
|
|
|
/**
|
|
Clear this diagnostics area.
|
|
|
|
Normally called at the end of a statement.
|
|
*/
|
|
|
|
void
|
|
Diagnostics_area::reset_diagnostics_area()
|
|
{
|
|
DBUG_ENTER("reset_diagnostics_area");
|
|
#ifdef DBUG_OFF
|
|
can_overwrite_status= FALSE;
|
|
/** Don't take chances in production */
|
|
m_message[0]= '\0';
|
|
m_sql_errno= 0;
|
|
m_server_status= 0;
|
|
m_affected_rows= 0;
|
|
m_last_insert_id= 0;
|
|
m_total_warn_count= 0;
|
|
#endif
|
|
is_sent= FALSE;
|
|
/** Tiny reset in debug mode to see garbage right away */
|
|
m_status= DA_EMPTY;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/**
|
|
Set OK status -- ends commands that do not return a
|
|
result set, e.g. INSERT/UPDATE/DELETE.
|
|
*/
|
|
|
|
void
|
|
Diagnostics_area::set_ok_status(THD *thd, ha_rows affected_rows_arg,
|
|
ulonglong last_insert_id_arg,
|
|
const char *message_arg)
|
|
{
|
|
DBUG_ENTER("set_ok_status");
|
|
DBUG_ASSERT(! is_set());
|
|
/*
|
|
In production, refuse to overwrite an error or a custom response
|
|
with an OK packet.
|
|
*/
|
|
if (is_error() || is_disabled())
|
|
return;
|
|
|
|
m_server_status= thd->server_status;
|
|
m_total_warn_count= thd->total_warn_count;
|
|
m_affected_rows= affected_rows_arg;
|
|
m_last_insert_id= last_insert_id_arg;
|
|
if (message_arg)
|
|
strmake(m_message, message_arg, sizeof(m_message) - 1);
|
|
else
|
|
m_message[0]= '\0';
|
|
m_status= DA_OK;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/**
|
|
Set EOF status.
|
|
*/
|
|
|
|
void
|
|
Diagnostics_area::set_eof_status(THD *thd)
|
|
{
|
|
DBUG_ENTER("set_eof_status");
|
|
/* Only allowed to report eof if has not yet reported an error */
|
|
DBUG_ASSERT(! is_set());
|
|
/*
|
|
In production, refuse to overwrite an error or a custom response
|
|
with an EOF packet.
|
|
*/
|
|
if (is_error() || is_disabled())
|
|
return;
|
|
|
|
m_server_status= thd->server_status;
|
|
/*
|
|
If inside a stored procedure, do not return the total
|
|
number of warnings, since they are not available to the client
|
|
anyway.
|
|
*/
|
|
m_total_warn_count= thd->spcont ? 0 : thd->total_warn_count;
|
|
|
|
m_status= DA_EOF;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/**
|
|
Set ERROR status.
|
|
*/
|
|
|
|
void
|
|
Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg,
|
|
const char *message_arg)
|
|
{
|
|
DBUG_ENTER("set_error_status");
|
|
/*
|
|
Only allowed to report error if has not yet reported a success
|
|
The only exception is when we flush the message to the client,
|
|
an error can happen during the flush.
|
|
*/
|
|
DBUG_ASSERT(! is_set() || can_overwrite_status);
|
|
#ifdef DBUG_OFF
|
|
/*
|
|
In production, refuse to overwrite a custom response with an
|
|
ERROR packet.
|
|
*/
|
|
if (is_disabled())
|
|
return;
|
|
#endif
|
|
|
|
m_sql_errno= sql_errno_arg;
|
|
strmake(m_message, message_arg, sizeof(m_message)-1);
|
|
|
|
m_status= DA_ERROR;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/**
|
|
Mark the diagnostics area as 'DISABLED'.
|
|
|
|
This is used in rare cases when the COM_ command at hand sends a response
|
|
in a custom format. One example is the query cache, another is
|
|
COM_STMT_PREPARE.
|
|
*/
|
|
|
|
void
|
|
Diagnostics_area::disable_status()
|
|
{
|
|
DBUG_ASSERT(! is_set());
|
|
m_status= DA_DISABLED;
|
|
}
|
|
|
|
|
|
THD::THD()
|
|
:Statement(&main_lex, &main_mem_root, CONVENTIONAL_EXECUTION,
|
|
/* statement id */ 0),
|
|
Open_tables_state(refresh_version), rli_fake(0),
|
|
lock_id(&main_lock_id),
|
|
in_sub_stmt(0),
|
|
sql_log_bin_toplevel(false), log_all_errors(0),
|
|
binlog_table_maps(0), binlog_flags(0UL),
|
|
table_map_for_update(0),
|
|
arg_of_last_insert_id_function(FALSE),
|
|
first_successful_insert_id_in_prev_stmt(0),
|
|
first_successful_insert_id_in_prev_stmt_for_binlog(0),
|
|
first_successful_insert_id_in_cur_stmt(0),
|
|
stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
|
|
examined_row_count(0),
|
|
global_read_lock(0),
|
|
global_disable_checkpoint(0),
|
|
is_fatal_error(0),
|
|
transaction_rollback_request(0),
|
|
is_fatal_sub_stmt_error(0),
|
|
rand_used(0),
|
|
time_zone_used(0),
|
|
in_lock_tables(0),
|
|
bootstrap(0),
|
|
derived_tables_processing(FALSE),
|
|
spcont(NULL),
|
|
m_parser_state(NULL)
|
|
#if defined(ENABLED_DEBUG_SYNC)
|
|
, debug_sync_control(0)
|
|
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
|
{
|
|
ulong tmp;
|
|
|
|
/*
|
|
Pass nominal parameters to init_alloc_root only to ensure that
|
|
the destructor works OK in case of an error. The main_mem_root
|
|
will be re-initialized in init_for_queries().
|
|
*/
|
|
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
|
|
stmt_arena= this;
|
|
thread_stack= 0;
|
|
scheduler= &thread_scheduler; // Will be fixed later
|
|
extra_port= 0;
|
|
catalog= (char*)"std"; // the only catalog we have for now
|
|
main_security_ctx.init();
|
|
security_ctx= &main_security_ctx;
|
|
some_tables_deleted=no_errors=password= 0;
|
|
query_start_used= query_start_sec_part_used= 0;
|
|
count_cuted_fields= CHECK_FIELD_IGNORE;
|
|
killed= NOT_KILLED;
|
|
col_access=0;
|
|
is_slave_error= thread_specific_used= FALSE;
|
|
hash_clear(&handler_tables_hash);
|
|
tmp_table=0;
|
|
used_tables=0;
|
|
cuted_fields= sent_row_count= row_count= 0L;
|
|
limit_found_rows= 0;
|
|
row_count_func= -1;
|
|
statement_id_counter= 0UL;
|
|
#ifdef ERROR_INJECT_SUPPORT
|
|
error_inject_value= 0UL;
|
|
#endif
|
|
// Must be reset to handle error with THD's created for init of mysqld
|
|
lex->current_select= 0;
|
|
user_time.val= start_time= start_time_sec_part= 0;
|
|
start_utime= prior_thr_create_utime= 0L;
|
|
utime_after_lock= 0L;
|
|
progress.report_to_client= 0;
|
|
progress.max_counter= 0;
|
|
current_linfo = 0;
|
|
slave_thread = 0;
|
|
bzero(&variables, sizeof(variables));
|
|
thread_id= 0;
|
|
one_shot_set= 0;
|
|
file_id = 0;
|
|
query_id= 0;
|
|
query_name_consts= 0;
|
|
warn_id= 0;
|
|
db_charset= global_system_variables.collation_database;
|
|
bzero(ha_data, sizeof(ha_data));
|
|
mysys_var=0;
|
|
binlog_evt_union.do_union= FALSE;
|
|
enable_slow_log= 0;
|
|
|
|
#ifndef DBUG_OFF
|
|
dbug_sentry=THD_SENTRY_MAGIC;
|
|
#endif
|
|
#ifndef EMBEDDED_LIBRARY
|
|
net.vio=0;
|
|
#endif
|
|
client_capabilities= 0; // minimalistic client
|
|
#ifdef HAVE_QUERY_CACHE
|
|
query_cache_init_query(&net); // If error on boot
|
|
#endif
|
|
ull=0;
|
|
system_thread= NON_SYSTEM_THREAD;
|
|
cleanup_done= abort_on_warning= no_warnings_for_error= 0;
|
|
peer_port= 0; // For SHOW PROCESSLIST
|
|
transaction.m_pending_rows_event= 0;
|
|
transaction.on= 1;
|
|
wt_thd_lazy_init(&transaction.wt, &variables.wt_deadlock_search_depth_short,
|
|
&variables.wt_timeout_short,
|
|
&variables.wt_deadlock_search_depth_long,
|
|
&variables.wt_timeout_long);
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
active_vio = 0;
|
|
#endif
|
|
pthread_mutex_init(&LOCK_thd_data, MY_MUTEX_INIT_FAST);
|
|
pthread_mutex_init(&LOCK_wakeup_ready, MY_MUTEX_INIT_FAST);
|
|
pthread_cond_init(&COND_wakeup_ready, 0);
|
|
|
|
/* Variables with default values */
|
|
proc_info="login";
|
|
where= THD::DEFAULT_WHERE;
|
|
server_id = ::server_id;
|
|
slave_net = 0;
|
|
command=COM_CONNECT;
|
|
*scramble= '\0';
|
|
|
|
init();
|
|
/* Initialize sub structures */
|
|
init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE);
|
|
#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
|
|
profiling.set_thd(this);
|
|
#endif
|
|
user_connect=(USER_CONN *)0;
|
|
hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
|
|
(hash_get_key) get_var_key,
|
|
(hash_free_key) free_user_var, 0);
|
|
|
|
sp_proc_cache= NULL;
|
|
sp_func_cache= NULL;
|
|
|
|
/* For user vars replication*/
|
|
if (opt_bin_log)
|
|
my_init_dynamic_array(&user_var_events,
|
|
sizeof(BINLOG_USER_VAR_EVENT *), 16, 16);
|
|
else
|
|
bzero((char*) &user_var_events, sizeof(user_var_events));
|
|
|
|
/* Protocol */
|
|
protocol= &protocol_text; // Default protocol
|
|
protocol_text.init(this);
|
|
protocol_binary.init(this);
|
|
|
|
tablespace_op=FALSE;
|
|
tmp= sql_rnd_with_mutex();
|
|
my_rnd_init(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
|
|
substitute_null_with_insert_id = FALSE;
|
|
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
|
|
thr_lock_owner_init(&main_lock_id, &lock_info);
|
|
|
|
m_internal_handler= NULL;
|
|
arena_for_cached_items= 0;
|
|
m_binlog_invoker= FALSE;
|
|
memset(&invoker_user, 0, sizeof(invoker_user));
|
|
memset(&invoker_host, 0, sizeof(invoker_host));
|
|
prepare_derived_at_open= FALSE;
|
|
create_tmp_table_for_derived= FALSE;
|
|
save_prep_leaf_list= FALSE;
|
|
}
|
|
|
|
|
|
void THD::push_internal_handler(Internal_error_handler *handler)
|
|
{
|
|
DBUG_ENTER("THD::push_internal_handler");
|
|
if (m_internal_handler)
|
|
{
|
|
handler->m_prev_internal_handler= m_internal_handler;
|
|
m_internal_handler= handler;
|
|
}
|
|
else
|
|
{
|
|
m_internal_handler= handler;
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
bool THD::handle_error(uint sql_errno, const char *message,
|
|
MYSQL_ERROR::enum_warning_level level)
|
|
{
|
|
for (Internal_error_handler *error_handler= m_internal_handler;
|
|
error_handler;
|
|
error_handler= error_handler->m_prev_internal_handler)
|
|
{
|
|
if (error_handler->handle_error(sql_errno, message, level, this))
|
|
return TRUE;
|
|
}
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
Internal_error_handler *THD::pop_internal_handler()
|
|
{
|
|
DBUG_ENTER("THD::pop_internal_handler");
|
|
DBUG_ASSERT(m_internal_handler != NULL);
|
|
Internal_error_handler *popped_handler= m_internal_handler;
|
|
m_internal_handler= m_internal_handler->m_prev_internal_handler;
|
|
DBUG_RETURN(popped_handler);
|
|
}
|
|
|
|
extern "C"
|
|
void *thd_alloc(MYSQL_THD thd, unsigned int size)
|
|
{
|
|
return thd->alloc(size);
|
|
}
|
|
|
|
extern "C"
|
|
void *thd_calloc(MYSQL_THD thd, unsigned int size)
|
|
{
|
|
return thd->calloc(size);
|
|
}
|
|
|
|
extern "C"
|
|
char *thd_strdup(MYSQL_THD thd, const char *str)
|
|
{
|
|
return thd->strdup(str);
|
|
}
|
|
|
|
extern "C"
|
|
char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size)
|
|
{
|
|
return thd->strmake(str, size);
|
|
}
|
|
|
|
extern "C"
|
|
LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
|
|
const char *str, unsigned int size,
|
|
int allocate_lex_string)
|
|
{
|
|
return thd->make_lex_string(lex_str, str, size,
|
|
(bool) allocate_lex_string);
|
|
}
|
|
|
|
extern "C"
|
|
void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size)
|
|
{
|
|
return thd->memdup(str, size);
|
|
}
|
|
|
|
extern "C"
|
|
void thd_get_xid(const MYSQL_THD thd, MYSQL_XID *xid)
|
|
{
|
|
*xid = *(MYSQL_XID *) &thd->transaction.xid_state.xid;
|
|
}
|
|
|
|
#ifdef _WIN32
|
|
extern "C" THD *_current_thd_noinline(void)
|
|
{
|
|
return my_pthread_getspecific_ptr(THD*,THR_THD);
|
|
}
|
|
#endif
|
|
/*
|
|
Init common variables that has to be reset on start and on change_user
|
|
*/
|
|
|
|
void THD::init(void)
|
|
{
|
|
pthread_mutex_lock(&LOCK_global_system_variables);
|
|
plugin_thdvar_init(this);
|
|
variables.time_format= date_time_format_copy((THD*) 0,
|
|
variables.time_format);
|
|
variables.date_format= date_time_format_copy((THD*) 0,
|
|
variables.date_format);
|
|
variables.datetime_format= date_time_format_copy((THD*) 0,
|
|
variables.datetime_format);
|
|
/*
|
|
variables= global_system_variables above has reset
|
|
variables.pseudo_thread_id to 0. We need to correct it here to
|
|
avoid temporary tables replication failure.
|
|
*/
|
|
variables.pseudo_thread_id= thread_id;
|
|
pthread_mutex_unlock(&LOCK_global_system_variables);
|
|
server_status= SERVER_STATUS_AUTOCOMMIT;
|
|
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
|
|
server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
|
|
options= thd_startup_options;
|
|
|
|
if (variables.max_join_size == HA_POS_ERROR)
|
|
options |= OPTION_BIG_SELECTS;
|
|
else
|
|
options &= ~OPTION_BIG_SELECTS;
|
|
|
|
transaction.all.modified_non_trans_table= transaction.stmt.modified_non_trans_table= FALSE;
|
|
open_options=ha_open_options;
|
|
update_lock_default= (variables.low_priority_updates ?
|
|
TL_WRITE_LOW_PRIORITY :
|
|
TL_WRITE);
|
|
session_tx_isolation= (enum_tx_isolation) variables.tx_isolation;
|
|
warn_list.empty();
|
|
bzero((char*) warn_count, sizeof(warn_count));
|
|
total_warn_count= 0;
|
|
update_charset();
|
|
reset_current_stmt_binlog_row_based();
|
|
bzero((char *) &status_var, sizeof(status_var));
|
|
bzero((char *) &org_status_var, sizeof(org_status_var));
|
|
sql_log_bin_toplevel= options & OPTION_BIN_LOG;
|
|
select_commands= update_commands= other_commands= 0;
|
|
/* Set to handle counting of aborted connections */
|
|
userstat_running= opt_userstat_running;
|
|
last_global_update_time= current_connect_time= time(NULL);
|
|
#if defined(ENABLED_DEBUG_SYNC)
|
|
/* Initialize the Debug Sync Facility. See debug_sync.cc. */
|
|
debug_sync_init_thread(this);
|
|
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
|
}
|
|
|
|
|
|
/* Updates some status variables to be used by update_global_user_stats */
|
|
|
|
void THD::update_stats(void)
|
|
{
|
|
/* sql_command == SQLCOM_END in case of parse errors or quit */
|
|
if (lex->sql_command != SQLCOM_END)
|
|
{
|
|
/* A SQL query. */
|
|
if (lex->sql_command == SQLCOM_SELECT)
|
|
select_commands++;
|
|
else if (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND)
|
|
{
|
|
/* Ignore 'SHOW ' commands */
|
|
}
|
|
else if (is_update_query(lex->sql_command))
|
|
update_commands++;
|
|
else
|
|
other_commands++;
|
|
}
|
|
}
|
|
|
|
|
|
void THD::update_all_stats()
|
|
{
|
|
ulonglong end_cpu_time, end_utime;
|
|
double busy_time, cpu_time;
|
|
|
|
/* Reset status variables used by information_schema.processlist */
|
|
progress.max_counter= 0;
|
|
progress.max_stage= 0;
|
|
progress.report= 0;
|
|
|
|
/* This is set at start of query if opt_userstat_running was set */
|
|
if (!userstat_running)
|
|
return;
|
|
|
|
end_cpu_time= my_getcputime();
|
|
end_utime= microsecond_interval_timer();
|
|
busy_time= (end_utime - start_utime) / 1000000.0;
|
|
cpu_time= (end_cpu_time - start_cpu_time) / 10000000.0;
|
|
/* In case there are bad values, 2629743 is the #seconds in a month. */
|
|
if (cpu_time > 2629743.0)
|
|
cpu_time= 0;
|
|
status_var_add(status_var.cpu_time, cpu_time);
|
|
status_var_add(status_var.busy_time, busy_time);
|
|
|
|
update_global_user_stats(this, TRUE, my_time(0));
|
|
// Has to be updated after update_global_user_stats()
|
|
userstat_running= 0;
|
|
}
|
|
|
|
|
|
/*
|
|
Init THD for query processing.
|
|
This has to be called once before we call mysql_parse.
|
|
See also comments in sql_class.h.
|
|
*/
|
|
|
|
void THD::init_for_queries()
|
|
{
|
|
set_time();
|
|
ha_enable_transaction(this,TRUE);
|
|
|
|
reset_root_defaults(mem_root, variables.query_alloc_block_size,
|
|
variables.query_prealloc_size);
|
|
#ifdef USING_TRANSACTIONS
|
|
reset_root_defaults(&transaction.mem_root,
|
|
variables.trans_alloc_block_size,
|
|
variables.trans_prealloc_size);
|
|
#endif
|
|
transaction.xid_state.xid.null();
|
|
transaction.xid_state.in_thd=1;
|
|
}
|
|
|
|
|
|
/*
|
|
Do what's needed when one invokes change user
|
|
|
|
SYNOPSIS
|
|
change_user()
|
|
|
|
IMPLEMENTATION
|
|
Reset all resources that are connection specific
|
|
*/
|
|
|
|
|
|
void THD::change_user(void)
|
|
{
|
|
pthread_mutex_lock(&LOCK_status);
|
|
add_to_status(&global_status_var, &status_var);
|
|
pthread_mutex_unlock(&LOCK_status);
|
|
|
|
cleanup();
|
|
killed= NOT_KILLED;
|
|
cleanup_done= 0;
|
|
init();
|
|
stmt_map.reset();
|
|
hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
|
|
(hash_get_key) get_var_key,
|
|
(hash_free_key) free_user_var, 0);
|
|
sp_cache_clear(&sp_proc_cache);
|
|
sp_cache_clear(&sp_func_cache);
|
|
}
|
|
|
|
|
|
/* Do operations that may take a long time */
|
|
|
|
void THD::cleanup(void)
|
|
{
|
|
DBUG_ENTER("THD::cleanup");
|
|
DBUG_ASSERT(cleanup_done == 0);
|
|
|
|
killed= KILL_CONNECTION;
|
|
#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
|
|
if (transaction.xid_state.xa_state == XA_PREPARED)
|
|
{
|
|
#error xid_state in the cache should be replaced by the allocated value
|
|
}
|
|
#endif
|
|
{
|
|
ha_rollback(this);
|
|
xid_cache_delete(&transaction.xid_state);
|
|
}
|
|
if (locked_tables)
|
|
{
|
|
lock=locked_tables; locked_tables=0;
|
|
close_thread_tables(this);
|
|
}
|
|
if (user_connect)
|
|
{
|
|
decrease_user_connections(user_connect);
|
|
user_connect= 0; // Safety
|
|
}
|
|
wt_thd_destroy(&transaction.wt);
|
|
|
|
#if defined(ENABLED_DEBUG_SYNC)
|
|
/* End the Debug Sync Facility. See debug_sync.cc. */
|
|
debug_sync_end_thread(this);
|
|
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
|
|
|
mysql_ha_cleanup(this);
|
|
delete_dynamic(&user_var_events);
|
|
hash_free(&user_vars);
|
|
close_temporary_tables(this);
|
|
my_free((char*) variables.time_format, MYF(MY_ALLOW_ZERO_PTR));
|
|
my_free((char*) variables.date_format, MYF(MY_ALLOW_ZERO_PTR));
|
|
my_free((char*) variables.datetime_format, MYF(MY_ALLOW_ZERO_PTR));
|
|
|
|
sp_cache_clear(&sp_proc_cache);
|
|
sp_cache_clear(&sp_func_cache);
|
|
|
|
if (global_read_lock)
|
|
unlock_global_read_lock(this);
|
|
if (ull)
|
|
{
|
|
pthread_mutex_lock(&LOCK_user_locks);
|
|
item_user_lock_release(ull);
|
|
pthread_mutex_unlock(&LOCK_user_locks);
|
|
ull= NULL;
|
|
}
|
|
|
|
cleanup_done=1;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
THD::~THD()
|
|
{
|
|
THD_CHECK_SENTRY(this);
|
|
DBUG_ENTER("~THD()");
|
|
/* Ensure that no one is using THD */
|
|
pthread_mutex_lock(&LOCK_thd_data);
|
|
pthread_mutex_unlock(&LOCK_thd_data);
|
|
add_to_status(&global_status_var, &status_var);
|
|
|
|
/* Close connection */
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (net.vio)
|
|
{
|
|
vio_delete(net.vio);
|
|
net_end(&net);
|
|
}
|
|
#endif
|
|
stmt_map.reset(); /* close all prepared statements */
|
|
DBUG_ASSERT(lock_info.n_cursors == 0);
|
|
|
|
if (!cleanup_done)
|
|
cleanup();
|
|
|
|
ha_close_connection(this);
|
|
plugin_thdvar_cleanup(this);
|
|
|
|
DBUG_PRINT("info", ("freeing security context"));
|
|
main_security_ctx.destroy();
|
|
safeFree(db);
|
|
free_root(&warn_root,MYF(0));
|
|
#ifdef USING_TRANSACTIONS
|
|
free_root(&transaction.mem_root,MYF(0));
|
|
#endif
|
|
mysys_var=0; // Safety (shouldn't be needed)
|
|
pthread_cond_destroy(&COND_wakeup_ready);
|
|
pthread_mutex_destroy(&LOCK_wakeup_ready);
|
|
pthread_mutex_destroy(&LOCK_thd_data);
|
|
#ifndef DBUG_OFF
|
|
dbug_sentry= THD_SENTRY_GONE;
|
|
#endif
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (rli_fake)
|
|
{
|
|
delete rli_fake;
|
|
rli_fake= NULL;
|
|
}
|
|
#endif
|
|
|
|
free_root(&main_mem_root, MYF(0));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Add all status variables to another status variable array
|
|
|
|
SYNOPSIS
|
|
add_to_status()
|
|
to_var add to this array
|
|
from_var from this array
|
|
|
|
NOTES
|
|
This function assumes that all variables at start are long/ulong and
|
|
other types are handled explicitely
|
|
*/
|
|
|
|
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
|
|
{
|
|
ulong *end= (ulong*) ((uchar*) to_var +
|
|
offsetof(STATUS_VAR, last_system_status_var) +
|
|
sizeof(ulong));
|
|
ulong *to= (ulong*) to_var, *from= (ulong*) from_var;
|
|
|
|
while (to != end)
|
|
*(to++)+= *(from++);
|
|
|
|
/* Handle the not ulong variables. See end of system_status_var */
|
|
to_var->bytes_received+= from_var->bytes_received;
|
|
to_var->bytes_sent+= from_var->bytes_sent;
|
|
to_var->rows_read+= from_var->rows_read;
|
|
to_var->rows_sent+= from_var->rows_sent;
|
|
to_var->rows_tmp_read+= from_var->rows_tmp_read;
|
|
to_var->binlog_bytes_written+= from_var->binlog_bytes_written;
|
|
to_var->cpu_time+= from_var->cpu_time;
|
|
to_var->busy_time+= from_var->busy_time;
|
|
}
|
|
|
|
/*
|
|
Add the difference between two status variable arrays to another one.
|
|
|
|
SYNOPSIS
|
|
add_diff_to_status
|
|
to_var add to this array
|
|
from_var from this array
|
|
dec_var minus this array
|
|
|
|
NOTE
|
|
This function assumes that all variables at start are long/ulong and
|
|
other types are handled explicitely
|
|
*/
|
|
|
|
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
|
|
STATUS_VAR *dec_var)
|
|
{
|
|
ulong *end= (ulong*) ((uchar*) to_var + offsetof(STATUS_VAR,
|
|
last_system_status_var) +
|
|
sizeof(ulong));
|
|
ulong *to= (ulong*) to_var, *from= (ulong*) from_var, *dec= (ulong*) dec_var;
|
|
|
|
while (to != end)
|
|
*(to++)+= *(from++) - *(dec++);
|
|
|
|
to_var->bytes_received+= from_var->bytes_received -
|
|
dec_var->bytes_received;
|
|
to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent;
|
|
to_var->rows_read+= from_var->rows_read - dec_var->rows_read;
|
|
to_var->rows_sent+= from_var->rows_sent - dec_var->rows_sent;
|
|
to_var->rows_tmp_read+= from_var->rows_tmp_read - dec_var->rows_tmp_read;
|
|
to_var->binlog_bytes_written+= from_var->binlog_bytes_written -
|
|
dec_var->binlog_bytes_written;
|
|
to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time;
|
|
to_var->busy_time+= from_var->busy_time - dec_var->busy_time;
|
|
}
|
|
|
|
#define SECONDS_TO_WAIT_FOR_KILL 2
|
|
#if !defined(__WIN__) && defined(HAVE_SELECT)
|
|
/* my_sleep() can wait for sub second times */
|
|
#define WAIT_FOR_KILL_TRY_TIMES 20
|
|
#else
|
|
#define WAIT_FOR_KILL_TRY_TIMES 2
|
|
#endif
|
|
|
|
|
|
void THD::awake(killed_state state_to_set)
|
|
{
|
|
DBUG_ENTER("THD::awake");
|
|
DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
|
|
THD_CHECK_SENTRY(this);
|
|
safe_mutex_assert_owner(&LOCK_thd_data);
|
|
|
|
if (global_system_variables.log_warnings > 3)
|
|
{
|
|
Security_context *sctx= security_ctx;
|
|
sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION),
|
|
thread_id,(db ? db : "unconnected"),
|
|
sctx->user ? sctx->user : "unauthenticated",
|
|
sctx->host_or_ip,
|
|
"KILLED");
|
|
}
|
|
killed= state_to_set;
|
|
if (state_to_set >= KILL_CONNECTION)
|
|
{
|
|
thr_alarm_kill(thread_id);
|
|
if (!slave_thread)
|
|
thread_scheduler.post_kill_notification(this);
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
if (this != current_thd)
|
|
{
|
|
/*
|
|
In addition to a signal, let's close the socket of the thread that
|
|
is being killed. This is to make sure it does not block if the
|
|
signal is lost. This needs to be done only on platforms where
|
|
signals are not a reliable interruption mechanism.
|
|
|
|
If we're killing ourselves, we know that we're not blocked, so this
|
|
hack is not used.
|
|
*/
|
|
|
|
close_active_vio();
|
|
}
|
|
#endif
|
|
}
|
|
if (mysys_var)
|
|
{
|
|
pthread_mutex_lock(&mysys_var->mutex);
|
|
if (!system_thread) // Don't abort locks
|
|
mysys_var->abort=1;
|
|
/*
|
|
This broadcast could be up in the air if the victim thread
|
|
exits the cond in the time between read and broadcast, but that is
|
|
ok since all we want to do is to make the victim thread get out
|
|
of waiting on current_cond.
|
|
If we see a non-zero current_cond: it cannot be an old value (because
|
|
then exit_cond() should have run and it can't because we have mutex); so
|
|
it is the true value but maybe current_mutex is not yet non-zero (we're
|
|
in the middle of enter_cond() and there is a "memory order
|
|
inversion"). So we test the mutex too to not lock 0.
|
|
|
|
Note that there is a small chance we fail to kill. If victim has locked
|
|
current_mutex, but hasn't yet entered enter_cond() (which means that
|
|
current_cond and current_mutex are 0), then the victim will not get
|
|
a signal and it may wait "forever" on the cond (until
|
|
we issue a second KILL or the status it's waiting for happens).
|
|
It's true that we have set its thd->killed but it may not
|
|
see it immediately and so may have time to reach the cond_wait().
|
|
|
|
We have to do the loop with trylock, because if we would use
|
|
pthread_mutex_lock(), we can cause a deadlock as we are here locking
|
|
the mysys_var->mutex and mysys_var->current_mutex in a different order
|
|
than in the thread we are trying to kill.
|
|
We only sleep for 2 seconds as we don't want to have LOCK_thd_data
|
|
locked too long time.
|
|
|
|
There is a small change we may not succeed in aborting a thread that
|
|
is not yet waiting for a mutex, but as this happens only for a
|
|
thread that was doing something else when the kill was issued and
|
|
which should detect the kill flag before it starts to wait, this
|
|
should be good enough.
|
|
*/
|
|
if (mysys_var->current_cond && mysys_var->current_mutex)
|
|
{
|
|
uint i;
|
|
for (i= 0; i < WAIT_FOR_KILL_TRY_TIMES * SECONDS_TO_WAIT_FOR_KILL; i++)
|
|
{
|
|
int ret= pthread_mutex_trylock(mysys_var->current_mutex);
|
|
pthread_cond_broadcast(mysys_var->current_cond);
|
|
if (!ret)
|
|
{
|
|
/* Signal is sure to get through */
|
|
pthread_mutex_unlock(mysys_var->current_mutex);
|
|
break;
|
|
}
|
|
}
|
|
my_sleep(1000000L / WAIT_FOR_KILL_TRY_TIMES);
|
|
}
|
|
pthread_mutex_unlock(&mysys_var->mutex);
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Get error number for killed state
|
|
Note that the error message can't have any parameters.
|
|
See thd::kill_message()
|
|
*/
|
|
|
|
int killed_errno(killed_state killed)
|
|
{
|
|
switch (killed) {
|
|
case NOT_KILLED:
|
|
case KILL_HARD_BIT:
|
|
return 0; // Probably wrong usage
|
|
case KILL_BAD_DATA:
|
|
case KILL_BAD_DATA_HARD:
|
|
return 0; // Not a real error
|
|
case KILL_CONNECTION:
|
|
case KILL_CONNECTION_HARD:
|
|
case KILL_SYSTEM_THREAD:
|
|
case KILL_SYSTEM_THREAD_HARD:
|
|
return ER_CONNECTION_KILLED;
|
|
case KILL_QUERY:
|
|
case KILL_QUERY_HARD:
|
|
return ER_QUERY_INTERRUPTED;
|
|
case KILL_SERVER:
|
|
case KILL_SERVER_HARD:
|
|
return ER_SERVER_SHUTDOWN;
|
|
}
|
|
return 0; // Keep compiler happy
|
|
}
|
|
|
|
|
|
/*
|
|
Remember the location of thread info, the structure needed for
|
|
sql_alloc() and the structure for the net buffer
|
|
*/
|
|
|
|
bool THD::store_globals()
|
|
{
|
|
/*
|
|
Assert that thread_stack is initialized: it's necessary to be able
|
|
to track stack overrun.
|
|
*/
|
|
DBUG_ASSERT(thread_stack);
|
|
|
|
if (my_pthread_setspecific_ptr(THR_THD, this) ||
|
|
my_pthread_setspecific_ptr(THR_MALLOC, &mem_root))
|
|
return 1;
|
|
mysys_var=my_thread_var;
|
|
/*
|
|
Let mysqld define the thread id (not mysys)
|
|
This allows us to move THD to different threads if needed.
|
|
*/
|
|
mysys_var->id= thread_id;
|
|
real_id= pthread_self(); // For debugging
|
|
mysys_var->stack_ends_here= thread_stack + // for consistency, see libevent_thread_proc
|
|
STACK_DIRECTION * (long)my_thread_stack_size;
|
|
|
|
/*
|
|
We have to call thr_lock_info_init() again here as THD may have been
|
|
created in another thread
|
|
*/
|
|
thr_lock_info_init(&lock_info);
|
|
|
|
#ifdef SAFE_MUTEX
|
|
/* Register order of mutex for wrong mutex deadlock detector */
|
|
pthread_mutex_lock(&LOCK_thd_data);
|
|
pthread_mutex_lock(&mysys_var->mutex);
|
|
|
|
pthread_mutex_unlock(&mysys_var->mutex);
|
|
pthread_mutex_unlock(&LOCK_thd_data);
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
|
|
/**
|
|
Untie THD from current thread
|
|
|
|
Used when using --thread-handling=pool-of-threads
|
|
*/
|
|
|
|
void THD::reset_globals()
|
|
{
|
|
pthread_mutex_lock(&LOCK_thd_data);
|
|
mysys_var= 0;
|
|
pthread_mutex_unlock(&LOCK_thd_data);
|
|
}
|
|
|
|
/*
|
|
Cleanup after query.
|
|
|
|
SYNOPSIS
|
|
THD::cleanup_after_query()
|
|
|
|
DESCRIPTION
|
|
This function is used to reset thread data to its default state.
|
|
|
|
NOTE
|
|
This function is not suitable for setting thread data to some
|
|
non-default values, as there is only one replication thread, so
|
|
different master threads may overwrite data of each other on
|
|
slave.
|
|
*/
|
|
|
|
void THD::cleanup_after_query()
|
|
{
|
|
/*
|
|
Reset rand_used so that detection of calls to rand() will save random
|
|
seeds if needed by the slave.
|
|
|
|
Do not reset rand_used if inside a stored function or trigger because
|
|
only the call to these operations is logged. Thus only the calling
|
|
statement needs to detect rand() calls made by its substatements. These
|
|
substatements must not set rand_used to 0 because it would remove the
|
|
detection of rand() by the calling statement.
|
|
*/
|
|
if (!in_sub_stmt) /* stored functions and triggers are a special case */
|
|
{
|
|
/* Forget those values, for next binlogger: */
|
|
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
|
|
auto_inc_intervals_in_cur_stmt_for_binlog.empty();
|
|
rand_used= 0;
|
|
}
|
|
if (first_successful_insert_id_in_cur_stmt > 0)
|
|
{
|
|
/* set what LAST_INSERT_ID() will return */
|
|
first_successful_insert_id_in_prev_stmt=
|
|
first_successful_insert_id_in_cur_stmt;
|
|
first_successful_insert_id_in_cur_stmt= 0;
|
|
substitute_null_with_insert_id= TRUE;
|
|
}
|
|
arg_of_last_insert_id_function= 0;
|
|
/* Free Items that were created during this execution */
|
|
free_items();
|
|
/* Reset where. */
|
|
where= THD::DEFAULT_WHERE;
|
|
/* reset table map for multi-table update */
|
|
table_map_for_update= 0;
|
|
m_binlog_invoker= FALSE;
|
|
}
|
|
|
|
|
|
/**
|
|
Create a LEX_STRING in this connection.
|
|
|
|
@param lex_str pointer to LEX_STRING object to be initialized
|
|
@param str initializer to be copied into lex_str
|
|
@param length length of str, in bytes
|
|
@param allocate_lex_string if TRUE, allocate new LEX_STRING object,
|
|
instead of using lex_str value
|
|
@return NULL on failure, or pointer to the LEX_STRING object
|
|
*/
|
|
LEX_STRING *THD::make_lex_string(LEX_STRING *lex_str,
|
|
const char* str, uint length,
|
|
bool allocate_lex_string)
|
|
{
|
|
if (allocate_lex_string)
|
|
if (!(lex_str= (LEX_STRING *)alloc(sizeof(LEX_STRING))))
|
|
return 0;
|
|
if (!(lex_str->str= strmake_root(mem_root, str, length)))
|
|
return 0;
|
|
lex_str->length= length;
|
|
return lex_str;
|
|
}
|
|
|
|
|
|
/*
|
|
Convert a string to another character set
|
|
|
|
SYNOPSIS
|
|
convert_string()
|
|
to Store new allocated string here
|
|
to_cs New character set for allocated string
|
|
from String to convert
|
|
from_length Length of string to convert
|
|
from_cs Original character set
|
|
|
|
NOTES
|
|
to will be 0-terminated to make it easy to pass to system funcs
|
|
|
|
RETURN
|
|
0 ok
|
|
1 End of memory.
|
|
In this case to->str will point to 0 and to->length will be 0.
|
|
*/
|
|
|
|
bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
|
|
const char *from, uint from_length,
|
|
CHARSET_INFO *from_cs)
|
|
{
|
|
DBUG_ENTER("convert_string");
|
|
size_t new_length= to_cs->mbmaxlen * from_length;
|
|
uint dummy_errors;
|
|
if (!(to->str= (char*) alloc(new_length+1)))
|
|
{
|
|
to->length= 0; // Safety fix
|
|
DBUG_RETURN(1); // EOM
|
|
}
|
|
to->length= copy_and_convert((char*) to->str, new_length, to_cs,
|
|
from, from_length, from_cs, &dummy_errors);
|
|
to->str[to->length]=0; // Safety
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
/*
|
|
Convert string from source character set to target character set inplace.
|
|
|
|
SYNOPSIS
|
|
THD::convert_string
|
|
|
|
DESCRIPTION
|
|
Convert string using convert_buffer - buffer for character set
|
|
conversion shared between all protocols.
|
|
|
|
RETURN
|
|
0 ok
|
|
!0 out of memory
|
|
*/
|
|
|
|
bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs)
|
|
{
|
|
uint dummy_errors;
|
|
if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors))
|
|
return TRUE;
|
|
/* If convert_buffer >> s copying is more efficient long term */
|
|
if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 ||
|
|
!s->is_alloced())
|
|
{
|
|
return s->copy(convert_buffer);
|
|
}
|
|
s->swap(convert_buffer);
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
/*
|
|
Update some cache variables when character set changes
|
|
*/
|
|
|
|
void THD::update_charset()
|
|
{
|
|
uint32 not_used;
|
|
charset_is_system_charset= !String::needs_conversion(0,charset(),
|
|
system_charset_info,
|
|
¬_used);
|
|
charset_is_collation_connection=
|
|
!String::needs_conversion(0,charset(),variables.collation_connection,
|
|
¬_used);
|
|
charset_is_character_set_filesystem=
|
|
!String::needs_conversion(0, charset(),
|
|
variables.character_set_filesystem, ¬_used);
|
|
}
|
|
|
|
|
|
/* routings to adding tables to list of changed in transaction tables */
|
|
|
|
inline static void list_include(CHANGED_TABLE_LIST** prev,
|
|
CHANGED_TABLE_LIST* curr,
|
|
CHANGED_TABLE_LIST* new_table)
|
|
{
|
|
if (new_table)
|
|
{
|
|
*prev = new_table;
|
|
(*prev)->next = curr;
|
|
}
|
|
}
|
|
|
|
/* add table to list of changed in transaction tables */
|
|
|
|
void THD::add_changed_table(TABLE *table)
|
|
{
|
|
DBUG_ENTER("THD::add_changed_table(table)");
|
|
|
|
DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
|
|
table->file->has_transactions());
|
|
add_changed_table(table->s->table_cache_key.str,
|
|
(long) table->s->table_cache_key.length);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::add_changed_table(const char *key, long key_length)
|
|
{
|
|
DBUG_ENTER("THD::add_changed_table(key)");
|
|
CHANGED_TABLE_LIST **prev_changed = &transaction.changed_tables;
|
|
CHANGED_TABLE_LIST *curr = transaction.changed_tables;
|
|
|
|
for (; curr; prev_changed = &(curr->next), curr = curr->next)
|
|
{
|
|
int cmp = (long)curr->key_length - (long)key_length;
|
|
if (cmp < 0)
|
|
{
|
|
list_include(prev_changed, curr, changed_table_dup(key, key_length));
|
|
DBUG_PRINT("info",
|
|
("key_length: %ld %u", key_length,
|
|
(*prev_changed)->key_length));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
else if (cmp == 0)
|
|
{
|
|
cmp = memcmp(curr->key, key, curr->key_length);
|
|
if (cmp < 0)
|
|
{
|
|
list_include(prev_changed, curr, changed_table_dup(key, key_length));
|
|
DBUG_PRINT("info",
|
|
("key_length: %ld %u", key_length,
|
|
(*prev_changed)->key_length));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
else if (cmp == 0)
|
|
{
|
|
DBUG_PRINT("info", ("already in list"));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
}
|
|
}
|
|
*prev_changed = changed_table_dup(key, key_length);
|
|
DBUG_PRINT("info", ("key_length: %ld %u", key_length,
|
|
(*prev_changed)->key_length));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
|
|
{
|
|
CHANGED_TABLE_LIST* new_table =
|
|
(CHANGED_TABLE_LIST*) trans_alloc(ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST))+
|
|
key_length + 1);
|
|
if (!new_table)
|
|
{
|
|
my_error(EE_OUTOFMEMORY, MYF(ME_BELL),
|
|
ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1);
|
|
killed= KILL_CONNECTION;
|
|
return 0;
|
|
}
|
|
|
|
new_table->key= ((char*)new_table)+ ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST));
|
|
new_table->next = 0;
|
|
new_table->key_length = key_length;
|
|
::memcpy(new_table->key, key, key_length);
|
|
return new_table;
|
|
}
|
|
|
|
|
|
int THD::send_explain_fields(select_result *result)
|
|
{
|
|
List<Item> field_list;
|
|
Item *item;
|
|
CHARSET_INFO *cs= system_charset_info;
|
|
field_list.push_back(new Item_return_int("id",3, MYSQL_TYPE_LONGLONG));
|
|
field_list.push_back(new Item_empty_string("select_type", 19, cs));
|
|
field_list.push_back(item= new Item_empty_string("table", NAME_CHAR_LEN, cs));
|
|
item->maybe_null= 1;
|
|
if (lex->describe & DESCRIBE_PARTITIONS)
|
|
{
|
|
/* Maximum length of string that make_used_partitions_str() can produce */
|
|
item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
|
|
cs);
|
|
field_list.push_back(item);
|
|
item->maybe_null= 1;
|
|
}
|
|
field_list.push_back(item= new Item_empty_string("type", 10, cs));
|
|
item->maybe_null= 1;
|
|
field_list.push_back(item=new Item_empty_string("possible_keys",
|
|
NAME_CHAR_LEN*MAX_KEY, cs));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item=new Item_empty_string("key", NAME_CHAR_LEN, cs));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item=new Item_empty_string("key_len",
|
|
NAME_CHAR_LEN*MAX_KEY));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item=new Item_empty_string("ref",
|
|
NAME_CHAR_LEN*MAX_REF_PARTS,
|
|
cs));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item= new Item_return_int("rows", 10,
|
|
MYSQL_TYPE_LONGLONG));
|
|
if (lex->describe & DESCRIBE_EXTENDED)
|
|
{
|
|
field_list.push_back(item= new Item_float("filtered", 0.1234, 2, 4));
|
|
item->maybe_null=1;
|
|
}
|
|
item->maybe_null= 1;
|
|
field_list.push_back(new Item_empty_string("Extra", 255, cs));
|
|
return (result->send_fields(field_list,
|
|
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF));
|
|
}
|
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
void THD::close_active_vio()
|
|
{
|
|
DBUG_ENTER("close_active_vio");
|
|
safe_mutex_assert_owner(&LOCK_thd_data);
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (active_vio)
|
|
{
|
|
vio_close(active_vio);
|
|
active_vio = 0;
|
|
}
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
#endif
|
|
|
|
|
|
struct Item_change_record: public ilink
|
|
{
|
|
Item **place;
|
|
Item *old_value;
|
|
/* Placement new was hidden by `new' in ilink (TODO: check): */
|
|
static void *operator new(size_t size, void *mem) { return mem; }
|
|
static void operator delete(void *ptr, size_t size) {}
|
|
static void operator delete(void *ptr, void *mem) { /* never called */ }
|
|
};
|
|
|
|
|
|
/*
|
|
Register an item tree tree transformation, performed by the query
|
|
optimizer. We need a pointer to runtime_memroot because it may be !=
|
|
thd->mem_root (due to possible set_n_backup_active_arena called for thd).
|
|
*/
|
|
|
|
void THD::nocheck_register_item_tree_change(Item **place, Item *old_value,
|
|
MEM_ROOT *runtime_memroot)
|
|
{
|
|
Item_change_record *change;
|
|
/*
|
|
Now we use one node per change, which adds some memory overhead,
|
|
but still is rather fast as we use alloc_root for allocations.
|
|
A list of item tree changes of an average query should be short.
|
|
*/
|
|
void *change_mem= alloc_root(runtime_memroot, sizeof(*change));
|
|
if (change_mem == 0)
|
|
{
|
|
/*
|
|
OOM, thd->fatal_error() is called by the error handler of the
|
|
memroot. Just return.
|
|
*/
|
|
return;
|
|
}
|
|
change= new (change_mem) Item_change_record;
|
|
change->place= place;
|
|
change->old_value= old_value;
|
|
change_list.append(change);
|
|
}
|
|
|
|
/**
|
|
Check and register item change if needed
|
|
|
|
@param place place where we should assign new value
|
|
@param new_value place of the new value
|
|
|
|
@details
|
|
Let C be a reference to an item that changed the reference A
|
|
at the location (occurrence) L1 and this change has been registered.
|
|
If C is substituted for reference A another location (occurrence) L2
|
|
that is to be registered as well than this change has to be
|
|
consistent with the first change in order the procedure that rollback
|
|
changes to substitute the same reference at both locations L1 and L2.
|
|
*/
|
|
|
|
void THD::check_and_register_item_tree_change(Item **place, Item **new_value,
|
|
MEM_ROOT *runtime_memroot)
|
|
{
|
|
Item_change_record *change;
|
|
I_List_iterator<Item_change_record> it(change_list);
|
|
while ((change= it++))
|
|
{
|
|
if (change->place == new_value)
|
|
break; // we need only very first value
|
|
}
|
|
if (change)
|
|
nocheck_register_item_tree_change(place, change->old_value,
|
|
runtime_memroot);
|
|
}
|
|
|
|
|
|
void THD::rollback_item_tree_changes()
|
|
{
|
|
I_List_iterator<Item_change_record> it(change_list);
|
|
Item_change_record *change;
|
|
DBUG_ENTER("rollback_item_tree_changes");
|
|
|
|
while ((change= it++))
|
|
*change->place= change->old_value;
|
|
/* We can forget about changes memory: it's allocated in runtime memroot */
|
|
change_list.empty();
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*****************************************************************************
|
|
** Functions to provide a interface to select results
|
|
*****************************************************************************/
|
|
|
|
select_result::select_result()
|
|
{
|
|
thd=current_thd;
|
|
nest_level= (uint) -1;
|
|
}
|
|
|
|
void select_result::send_error(uint errcode,const char *err)
|
|
{
|
|
my_message(errcode, err, MYF(0));
|
|
}
|
|
|
|
|
|
void select_result::cleanup()
|
|
{
|
|
/* do nothing */
|
|
}
|
|
|
|
bool select_result::check_simple_select() const
|
|
{
|
|
my_error(ER_SP_BAD_CURSOR_QUERY, MYF(0));
|
|
return TRUE;
|
|
}
|
|
|
|
|
|
static String default_line_term("\n",default_charset_info);
|
|
static String default_escaped("\\",default_charset_info);
|
|
static String default_field_term("\t",default_charset_info);
|
|
|
|
sql_exchange::sql_exchange(char *name,bool flag)
|
|
:file_name(name), opt_enclosed(0), dumpfile(flag), skip_lines(0)
|
|
{
|
|
field_term= &default_field_term;
|
|
enclosed= line_start= &my_empty_string;
|
|
line_term= &default_line_term;
|
|
escaped= &default_escaped;
|
|
cs= NULL;
|
|
}
|
|
|
|
bool sql_exchange::escaped_given(void)
|
|
{
|
|
return escaped != &default_escaped;
|
|
}
|
|
|
|
|
|
bool select_send::send_fields(List<Item> &list, uint flags)
|
|
{
|
|
bool res;
|
|
if (!(res= thd->protocol->send_fields(&list, flags)))
|
|
is_result_set_started= 1;
|
|
return res;
|
|
}
|
|
|
|
void select_send::abort()
|
|
{
|
|
DBUG_ENTER("select_send::abort");
|
|
if (is_result_set_started && thd->spcont &&
|
|
thd->spcont->find_handler(thd, thd->main_da.sql_errno(),
|
|
MYSQL_ERROR::WARN_LEVEL_ERROR))
|
|
{
|
|
/*
|
|
We're executing a stored procedure, have an open result
|
|
set, an SQL exception condition and a handler for it.
|
|
In this situation we must abort the current statement,
|
|
silence the error and start executing the continue/exit
|
|
handler.
|
|
Before aborting the statement, let's end the open result set, as
|
|
otherwise the client will hang due to the violation of the
|
|
client/server protocol.
|
|
*/
|
|
thd->protocol->end_partial_result_set(thd);
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/**
|
|
Cleanup an instance of this class for re-use
|
|
at next execution of a prepared statement/
|
|
stored procedure statement.
|
|
*/
|
|
|
|
void select_send::cleanup()
|
|
{
|
|
is_result_set_started= FALSE;
|
|
}
|
|
|
|
/* Send data to client. Returns 0 if ok */
|
|
|
|
int select_send::send_data(List<Item> &items)
|
|
{
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
We may be passing the control from mysqld to the client: release the
|
|
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
|
by thd
|
|
*/
|
|
ha_release_temporary_latches(thd);
|
|
|
|
List_iterator_fast<Item> li(items);
|
|
Protocol *protocol= thd->protocol;
|
|
char buff[MAX_FIELD_WIDTH];
|
|
String buffer(buff, sizeof(buff), &my_charset_bin);
|
|
DBUG_ENTER("select_send::send_data");
|
|
|
|
protocol->prepare_for_resend();
|
|
Item *item;
|
|
while ((item=li++))
|
|
{
|
|
if (item->send(protocol, &buffer))
|
|
{
|
|
protocol->free(); // Free used buffer
|
|
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
|
|
break;
|
|
}
|
|
/*
|
|
Reset buffer to its original state, as it may have been altered in
|
|
Item::send().
|
|
*/
|
|
buffer.set(buff, sizeof(buff), &my_charset_bin);
|
|
}
|
|
thd->sent_row_count++;
|
|
if (thd->is_error())
|
|
{
|
|
protocol->remove_last_row();
|
|
DBUG_RETURN(1);
|
|
}
|
|
if (thd->vio_ok())
|
|
DBUG_RETURN(protocol->write());
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
bool select_send::send_eof()
|
|
{
|
|
/*
|
|
We may be passing the control from mysqld to the client: release the
|
|
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
|
by thd
|
|
*/
|
|
ha_release_temporary_latches(thd);
|
|
|
|
/* Unlock tables before sending packet to gain some speed */
|
|
if (thd->lock)
|
|
{
|
|
mysql_unlock_tables(thd, thd->lock);
|
|
thd->lock=0;
|
|
}
|
|
/*
|
|
Don't send EOF if we're in error condition (which implies we've already
|
|
sent or are sending an error)
|
|
*/
|
|
if (thd->is_error())
|
|
return TRUE;
|
|
::my_eof(thd);
|
|
is_result_set_started= 0;
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
/************************************************************************
|
|
Handling writing to file
|
|
************************************************************************/
|
|
|
|
void select_to_file::send_error(uint errcode,const char *err)
|
|
{
|
|
my_message(errcode, err, MYF(0));
|
|
if (file > 0)
|
|
{
|
|
(void) end_io_cache(&cache);
|
|
(void) my_close(file,MYF(0));
|
|
(void) my_delete(path,MYF(0)); // Delete file on error
|
|
file= -1;
|
|
}
|
|
}
|
|
|
|
|
|
bool select_to_file::send_eof()
|
|
{
|
|
int error= test(end_io_cache(&cache));
|
|
if (my_close(file,MYF(MY_WME)))
|
|
error= 1;
|
|
if (!error)
|
|
{
|
|
/*
|
|
In order to remember the value of affected rows for ROW_COUNT()
|
|
function, SELECT INTO has to have an own SQLCOM.
|
|
TODO: split from SQLCOM_SELECT
|
|
*/
|
|
::my_ok(thd,row_count);
|
|
}
|
|
file= -1;
|
|
return error;
|
|
}
|
|
|
|
|
|
void select_to_file::cleanup()
|
|
{
|
|
/* In case of error send_eof() may be not called: close the file here. */
|
|
if (file >= 0)
|
|
{
|
|
(void) end_io_cache(&cache);
|
|
(void) my_close(file,MYF(0));
|
|
file= -1;
|
|
}
|
|
path[0]= '\0';
|
|
row_count= 0;
|
|
}
|
|
|
|
|
|
select_to_file::~select_to_file()
|
|
{
|
|
if (file >= 0)
|
|
{ // This only happens in case of error
|
|
(void) end_io_cache(&cache);
|
|
(void) my_close(file,MYF(0));
|
|
file= -1;
|
|
}
|
|
}
|
|
|
|
/***************************************************************************
|
|
** Export of select to textfile
|
|
***************************************************************************/
|
|
|
|
select_export::~select_export()
|
|
{
|
|
thd->sent_row_count=row_count;
|
|
}
|
|
|
|
|
|
/*
|
|
Create file with IO cache
|
|
|
|
SYNOPSIS
|
|
create_file()
|
|
thd Thread handle
|
|
path File name
|
|
exchange Excange class
|
|
cache IO cache
|
|
|
|
RETURN
|
|
>= 0 File handle
|
|
-1 Error
|
|
*/
|
|
|
|
|
|
static File create_file(THD *thd, char *path, sql_exchange *exchange,
|
|
IO_CACHE *cache)
|
|
{
|
|
File file;
|
|
uint option= MY_UNPACK_FILENAME | MY_RELATIVE_PATH;
|
|
|
|
#ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS
|
|
option|= MY_REPLACE_DIR; // Force use of db directory
|
|
#endif
|
|
|
|
if (!dirname_length(exchange->file_name))
|
|
{
|
|
strxnmov(path, FN_REFLEN-1, mysql_real_data_home, thd->db ? thd->db : "",
|
|
NullS);
|
|
(void) fn_format(path, exchange->file_name, path, "", option);
|
|
}
|
|
else
|
|
(void) fn_format(path, exchange->file_name, mysql_real_data_home, "", option);
|
|
|
|
if (!is_secure_file_path(path))
|
|
{
|
|
/* Write only allowed to dir or subdir specified by secure_file_priv */
|
|
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
|
|
return -1;
|
|
}
|
|
|
|
if (!access(path, F_OK))
|
|
{
|
|
my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name);
|
|
return -1;
|
|
}
|
|
/* Create the file world readable */
|
|
if ((file= my_create(path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0)
|
|
return file;
|
|
#ifdef HAVE_FCHMOD
|
|
(void) fchmod(file, 0666); // Because of umask()
|
|
#else
|
|
(void) chmod(path, 0666);
|
|
#endif
|
|
if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME)))
|
|
{
|
|
my_close(file, MYF(0));
|
|
my_delete(path, MYF(0)); // Delete file on error, it was just created
|
|
return -1;
|
|
}
|
|
return file;
|
|
}
|
|
|
|
|
|
int
|
|
select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|
{
|
|
bool blob_flag=0;
|
|
bool string_results= FALSE, non_string_results= FALSE;
|
|
unit= u;
|
|
if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN)
|
|
strmake(path,exchange->file_name,FN_REFLEN-1);
|
|
|
|
write_cs= exchange->cs ? exchange->cs : &my_charset_bin;
|
|
|
|
if ((file= create_file(thd, path, exchange, &cache)) < 0)
|
|
return 1;
|
|
/* Check if there is any blobs in data */
|
|
{
|
|
List_iterator_fast<Item> li(list);
|
|
Item *item;
|
|
while ((item=li++))
|
|
{
|
|
if (item->max_length >= MAX_BLOB_WIDTH)
|
|
{
|
|
blob_flag=1;
|
|
break;
|
|
}
|
|
if (item->result_type() == STRING_RESULT)
|
|
string_results= TRUE;
|
|
else
|
|
non_string_results= TRUE;
|
|
}
|
|
}
|
|
if (exchange->escaped->numchars() > 1 || exchange->enclosed->numchars() > 1)
|
|
{
|
|
my_error(ER_WRONG_FIELD_TERMINATORS, MYF(0));
|
|
return TRUE;
|
|
}
|
|
if (exchange->escaped->length() > 1 || exchange->enclosed->length() > 1 ||
|
|
!my_isascii(exchange->escaped->ptr()[0]) ||
|
|
!my_isascii(exchange->enclosed->ptr()[0]) ||
|
|
!exchange->field_term->is_ascii() || !exchange->line_term->is_ascii() ||
|
|
!exchange->line_start->is_ascii())
|
|
{
|
|
/*
|
|
Current LOAD DATA INFILE recognizes field/line separators "as is" without
|
|
converting from client charset to data file charset. So, it is supposed,
|
|
that input file of LOAD DATA INFILE consists of data in one charset and
|
|
separators in other charset. For the compatibility with that [buggy]
|
|
behaviour SELECT INTO OUTFILE implementation has been saved "as is" too,
|
|
but the new warning message has been added:
|
|
|
|
Non-ASCII separator arguments are not fully supported
|
|
*/
|
|
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
|
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED,
|
|
ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED));
|
|
}
|
|
field_term_length=exchange->field_term->length();
|
|
field_term_char= field_term_length ?
|
|
(int) (uchar) (*exchange->field_term)[0] : INT_MAX;
|
|
if (!exchange->line_term->length())
|
|
exchange->line_term=exchange->field_term; // Use this if it exists
|
|
field_sep_char= (exchange->enclosed->length() ?
|
|
(int) (uchar) (*exchange->enclosed)[0] : field_term_char);
|
|
if (exchange->escaped->length() && (exchange->escaped_given() ||
|
|
!(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
|
|
escape_char= (int) (uchar) (*exchange->escaped)[0];
|
|
else
|
|
escape_char= -1;
|
|
is_ambiguous_field_sep= test(strchr(ESCAPE_CHARS, field_sep_char));
|
|
is_unsafe_field_sep= test(strchr(NUMERIC_CHARS, field_sep_char));
|
|
line_sep_char= (exchange->line_term->length() ?
|
|
(int) (uchar) (*exchange->line_term)[0] : INT_MAX);
|
|
if (!field_term_length)
|
|
exchange->opt_enclosed=0;
|
|
if (!exchange->enclosed->length())
|
|
exchange->opt_enclosed=1; // A little quicker loop
|
|
fixed_row_size= (!field_term_length && !exchange->enclosed->length() &&
|
|
!blob_flag);
|
|
if ((is_ambiguous_field_sep && exchange->enclosed->is_empty() &&
|
|
(string_results || is_unsafe_field_sep)) ||
|
|
(exchange->opt_enclosed && non_string_results &&
|
|
field_term_length && strchr(NUMERIC_CHARS, field_term_char)))
|
|
{
|
|
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
|
ER_AMBIGUOUS_FIELD_TERM, ER(ER_AMBIGUOUS_FIELD_TERM));
|
|
is_ambiguous_field_term= TRUE;
|
|
}
|
|
else
|
|
is_ambiguous_field_term= FALSE;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
#define NEED_ESCAPING(x) ((int) (uchar) (x) == escape_char || \
|
|
(enclosed ? (int) (uchar) (x) == field_sep_char \
|
|
: (int) (uchar) (x) == field_term_char) || \
|
|
(int) (uchar) (x) == line_sep_char || \
|
|
!(x))
|
|
|
|
int select_export::send_data(List<Item> &items)
|
|
{
|
|
|
|
DBUG_ENTER("select_export::send_data");
|
|
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
|
|
char cvt_buff[MAX_FIELD_WIDTH];
|
|
String cvt_str(cvt_buff, sizeof(cvt_buff), write_cs);
|
|
bool space_inited=0;
|
|
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
|
tmp.length(0);
|
|
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
row_count++;
|
|
Item *item;
|
|
uint used_length=0,items_left=items.elements;
|
|
List_iterator_fast<Item> li(items);
|
|
|
|
if (my_b_write(&cache,(uchar*) exchange->line_start->ptr(),
|
|
exchange->line_start->length()))
|
|
goto err;
|
|
while ((item=li++))
|
|
{
|
|
Item_result result_type=item->result_type();
|
|
bool enclosed = (exchange->enclosed->length() &&
|
|
(!exchange->opt_enclosed || result_type == STRING_RESULT));
|
|
res=item->str_result(&tmp);
|
|
if (res && !my_charset_same(write_cs, res->charset()) &&
|
|
!my_charset_same(write_cs, &my_charset_bin))
|
|
{
|
|
const char *well_formed_error_pos;
|
|
const char *cannot_convert_error_pos;
|
|
const char *from_end_pos;
|
|
const char *error_pos;
|
|
uint32 bytes;
|
|
uint64 estimated_bytes=
|
|
((uint64) res->length() / res->charset()->mbminlen + 1) *
|
|
write_cs->mbmaxlen + 1;
|
|
set_if_smaller(estimated_bytes, UINT_MAX32);
|
|
if (cvt_str.realloc((uint32) estimated_bytes))
|
|
{
|
|
my_error(ER_OUTOFMEMORY, MYF(0), (uint32) estimated_bytes);
|
|
goto err;
|
|
}
|
|
|
|
bytes= well_formed_copy_nchars(write_cs, (char *) cvt_str.ptr(),
|
|
cvt_str.alloced_length(),
|
|
res->charset(), res->ptr(), res->length(),
|
|
UINT_MAX32, // copy all input chars,
|
|
// i.e. ignore nchars parameter
|
|
&well_formed_error_pos,
|
|
&cannot_convert_error_pos,
|
|
&from_end_pos);
|
|
error_pos= well_formed_error_pos ? well_formed_error_pos
|
|
: cannot_convert_error_pos;
|
|
if (error_pos)
|
|
{
|
|
char printable_buff[32];
|
|
convert_to_printable(printable_buff, sizeof(printable_buff),
|
|
error_pos, res->ptr() + res->length() - error_pos,
|
|
res->charset(), 6);
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
|
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
|
|
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
|
|
"string", printable_buff,
|
|
item->name, (ulong) row_count);
|
|
}
|
|
else if (from_end_pos < res->ptr() + res->length())
|
|
{
|
|
/*
|
|
result is longer than UINT_MAX32 and doesn't fit into String
|
|
*/
|
|
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
|
WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED),
|
|
item->full_name(), row_count);
|
|
}
|
|
cvt_str.length(bytes);
|
|
res= &cvt_str;
|
|
}
|
|
if (res && enclosed)
|
|
{
|
|
if (my_b_write(&cache,(uchar*) exchange->enclosed->ptr(),
|
|
exchange->enclosed->length()))
|
|
goto err;
|
|
}
|
|
if (!res)
|
|
{ // NULL
|
|
if (!fixed_row_size)
|
|
{
|
|
if (escape_char != -1) // Use \N syntax
|
|
{
|
|
null_buff[0]=escape_char;
|
|
null_buff[1]='N';
|
|
if (my_b_write(&cache,(uchar*) null_buff,2))
|
|
goto err;
|
|
}
|
|
else if (my_b_write(&cache,(uchar*) "NULL",4))
|
|
goto err;
|
|
}
|
|
else
|
|
{
|
|
used_length=0; // Fill with space
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (fixed_row_size)
|
|
used_length=min(res->length(),item->max_length);
|
|
else
|
|
used_length=res->length();
|
|
if ((result_type == STRING_RESULT || is_unsafe_field_sep) &&
|
|
escape_char != -1)
|
|
{
|
|
char *pos, *start, *end;
|
|
CHARSET_INFO *res_charset= res->charset();
|
|
CHARSET_INFO *character_set_client= thd->variables.
|
|
character_set_client;
|
|
bool check_second_byte= (res_charset == &my_charset_bin) &&
|
|
character_set_client->
|
|
escape_with_backslash_is_dangerous;
|
|
DBUG_ASSERT(character_set_client->mbmaxlen == 2 ||
|
|
!character_set_client->escape_with_backslash_is_dangerous);
|
|
for (start=pos=(char*) res->ptr(),end=pos+used_length ;
|
|
pos != end ;
|
|
pos++)
|
|
{
|
|
#ifdef USE_MB
|
|
if (use_mb(res_charset))
|
|
{
|
|
int l;
|
|
if ((l=my_ismbchar(res_charset, pos, end)))
|
|
{
|
|
pos += l-1;
|
|
continue;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
Special case when dumping BINARY/VARBINARY/BLOB values
|
|
for the clients with character sets big5, cp932, gbk and sjis,
|
|
which can have the escape character (0x5C "\" by default)
|
|
as the second byte of a multi-byte sequence.
|
|
|
|
If
|
|
- pos[0] is a valid multi-byte head (e.g 0xEE) and
|
|
- pos[1] is 0x00, which will be escaped as "\0",
|
|
|
|
then we'll get "0xEE + 0x5C + 0x30" in the output file.
|
|
|
|
If this file is later loaded using this sequence of commands:
|
|
|
|
mysql> create table t1 (a varchar(128)) character set big5;
|
|
mysql> LOAD DATA INFILE 'dump.txt' INTO TABLE t1;
|
|
|
|
then 0x5C will be misinterpreted as the second byte
|
|
of a multi-byte character "0xEE + 0x5C", instead of
|
|
escape character for 0x00.
|
|
|
|
To avoid this confusion, we'll escape the multi-byte
|
|
head character too, so the sequence "0xEE + 0x00" will be
|
|
dumped as "0x5C + 0xEE + 0x5C + 0x30".
|
|
|
|
Note, in the condition below we only check if
|
|
mbcharlen is equal to 2, because there are no
|
|
character sets with mbmaxlen longer than 2
|
|
and with escape_with_backslash_is_dangerous set.
|
|
DBUG_ASSERT before the loop makes that sure.
|
|
*/
|
|
|
|
if ((NEED_ESCAPING(*pos) ||
|
|
(check_second_byte &&
|
|
my_mbcharlen(character_set_client, (uchar) *pos) == 2 &&
|
|
pos + 1 < end &&
|
|
NEED_ESCAPING(pos[1]))) &&
|
|
/*
|
|
Don't escape field_term_char by doubling - doubling is only
|
|
valid for ENCLOSED BY characters:
|
|
*/
|
|
(enclosed || !is_ambiguous_field_term ||
|
|
(int) (uchar) *pos != field_term_char))
|
|
{
|
|
char tmp_buff[2];
|
|
tmp_buff[0]= ((int) (uchar) *pos == field_sep_char &&
|
|
is_ambiguous_field_sep) ?
|
|
field_sep_char : escape_char;
|
|
tmp_buff[1]= *pos ? *pos : '0';
|
|
if (my_b_write(&cache,(uchar*) start,(uint) (pos-start)) ||
|
|
my_b_write(&cache,(uchar*) tmp_buff,2))
|
|
goto err;
|
|
start=pos+1;
|
|
}
|
|
}
|
|
if (my_b_write(&cache,(uchar*) start,(uint) (pos-start)))
|
|
goto err;
|
|
}
|
|
else if (my_b_write(&cache,(uchar*) res->ptr(),used_length))
|
|
goto err;
|
|
}
|
|
if (fixed_row_size)
|
|
{ // Fill with space
|
|
if (item->max_length > used_length)
|
|
{
|
|
if (!space_inited)
|
|
{
|
|
space_inited=1;
|
|
bfill(space,sizeof(space),' ');
|
|
}
|
|
uint length=item->max_length-used_length;
|
|
for (; length > sizeof(space) ; length-=sizeof(space))
|
|
{
|
|
if (my_b_write(&cache,(uchar*) space,sizeof(space)))
|
|
goto err;
|
|
}
|
|
if (my_b_write(&cache,(uchar*) space,length))
|
|
goto err;
|
|
}
|
|
}
|
|
if (res && enclosed)
|
|
{
|
|
if (my_b_write(&cache, (uchar*) exchange->enclosed->ptr(),
|
|
exchange->enclosed->length()))
|
|
goto err;
|
|
}
|
|
if (--items_left)
|
|
{
|
|
if (my_b_write(&cache, (uchar*) exchange->field_term->ptr(),
|
|
field_term_length))
|
|
goto err;
|
|
}
|
|
}
|
|
if (my_b_write(&cache,(uchar*) exchange->line_term->ptr(),
|
|
exchange->line_term->length()))
|
|
goto err;
|
|
DBUG_RETURN(0);
|
|
err:
|
|
DBUG_RETURN(1);
|
|
}
|
|
|
|
|
|
/***************************************************************************
|
|
** Dump of select to a binary file
|
|
***************************************************************************/
|
|
|
|
|
|
int
|
|
select_dump::prepare(List<Item> &list __attribute__((unused)),
|
|
SELECT_LEX_UNIT *u)
|
|
{
|
|
unit= u;
|
|
return (int) ((file= create_file(thd, path, exchange, &cache)) < 0);
|
|
}
|
|
|
|
|
|
int select_dump::send_data(List<Item> &items)
|
|
{
|
|
List_iterator_fast<Item> li(items);
|
|
char buff[MAX_FIELD_WIDTH];
|
|
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
|
tmp.length(0);
|
|
Item *item;
|
|
DBUG_ENTER("select_dump::send_data");
|
|
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (row_count++ > 1)
|
|
{
|
|
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
|
|
goto err;
|
|
}
|
|
while ((item=li++))
|
|
{
|
|
res=item->str_result(&tmp);
|
|
if (!res) // If NULL
|
|
{
|
|
if (my_b_write(&cache,(uchar*) "",1))
|
|
goto err;
|
|
}
|
|
else if (my_b_write(&cache,(uchar*) res->ptr(),res->length()))
|
|
{
|
|
my_error(ER_ERROR_ON_WRITE, MYF(0), path, my_errno);
|
|
goto err;
|
|
}
|
|
}
|
|
DBUG_RETURN(0);
|
|
err:
|
|
DBUG_RETURN(1);
|
|
}
|
|
|
|
|
|
select_subselect::select_subselect(Item_subselect *item_arg)
|
|
{
|
|
item= item_arg;
|
|
}
|
|
|
|
|
|
int select_singlerow_subselect::send_data(List<Item> &items)
|
|
{
|
|
DBUG_ENTER("select_singlerow_subselect::send_data");
|
|
Item_singlerow_subselect *it= (Item_singlerow_subselect *)item;
|
|
if (it->assigned())
|
|
{
|
|
my_message(ER_SUBQUERY_NO_1_ROW, ER(ER_SUBQUERY_NO_1_ROW), MYF(0));
|
|
DBUG_RETURN(1);
|
|
}
|
|
if (unit->offset_limit_cnt)
|
|
{ // Using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
List_iterator_fast<Item> li(items);
|
|
Item *val_item;
|
|
for (uint i= 0; (val_item= li++); i++)
|
|
it->store(i, val_item);
|
|
it->assigned(1);
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
void select_max_min_finder_subselect::cleanup()
|
|
{
|
|
DBUG_ENTER("select_max_min_finder_subselect::cleanup");
|
|
cache= 0;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
int select_max_min_finder_subselect::send_data(List<Item> &items)
|
|
{
|
|
DBUG_ENTER("select_max_min_finder_subselect::send_data");
|
|
Item_maxmin_subselect *it= (Item_maxmin_subselect *)item;
|
|
List_iterator_fast<Item> li(items);
|
|
Item *val_item= li++;
|
|
it->register_value();
|
|
if (it->assigned())
|
|
{
|
|
cache->store(val_item);
|
|
if ((this->*op)())
|
|
it->store(0, cache);
|
|
}
|
|
else
|
|
{
|
|
if (!cache)
|
|
{
|
|
cache= Item_cache::get_cache(val_item);
|
|
switch (val_item->result_type()) {
|
|
case REAL_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_real;
|
|
break;
|
|
case INT_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_int;
|
|
break;
|
|
case STRING_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_str;
|
|
break;
|
|
case DECIMAL_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_decimal;
|
|
break;
|
|
case ROW_RESULT:
|
|
case TIME_RESULT:
|
|
case IMPOSSIBLE_RESULT:
|
|
// This case should never be choosen
|
|
DBUG_ASSERT(0);
|
|
op= 0;
|
|
}
|
|
}
|
|
cache->store(val_item);
|
|
it->store(0, cache);
|
|
}
|
|
it->assigned(1);
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_real()
|
|
{
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
double val1= cache->val_real(), val2= maxmin->val_real();
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return(val1 > val2);
|
|
return (val1 < val2);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_int()
|
|
{
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
longlong val1= cache->val_int(), val2= maxmin->val_int();
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return(val1 > val2);
|
|
return (val1 < val2);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_decimal()
|
|
{
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
my_decimal cval, *cvalue= cache->val_decimal(&cval);
|
|
my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return (my_decimal_cmp(cvalue, mvalue) > 0) ;
|
|
return (my_decimal_cmp(cvalue,mvalue) < 0);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_str()
|
|
{
|
|
String *val1, *val2, buf1, buf2;
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
/*
|
|
as far as both operand is Item_cache buf1 & buf2 will not be used,
|
|
but added for safety
|
|
*/
|
|
val1= cache->val_str(&buf1);
|
|
val2= maxmin->val_str(&buf1);
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return (sortcmp(val1, val2, cache->collation.collation) > 0) ;
|
|
return (sortcmp(val1, val2, cache->collation.collation) < 0);
|
|
}
|
|
|
|
int select_exists_subselect::send_data(List<Item> &items)
|
|
{
|
|
DBUG_ENTER("select_exists_subselect::send_data");
|
|
Item_exists_subselect *it= (Item_exists_subselect *)item;
|
|
if (unit->offset_limit_cnt)
|
|
{ // Using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
it->value= 1;
|
|
it->assigned(1);
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
/***************************************************************************
|
|
Dump of select to variables
|
|
***************************************************************************/
|
|
|
|
int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|
{
|
|
unit= u;
|
|
|
|
if (var_list.elements != list.elements)
|
|
{
|
|
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
|
|
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0));
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
bool select_dumpvar::check_simple_select() const
|
|
{
|
|
my_error(ER_SP_BAD_CURSOR_SELECT, MYF(0));
|
|
return TRUE;
|
|
}
|
|
|
|
|
|
void select_dumpvar::cleanup()
|
|
{
|
|
row_count= 0;
|
|
}
|
|
|
|
|
|
Query_arena::Type Query_arena::type() const
|
|
{
|
|
DBUG_ASSERT(0); /* Should never be called */
|
|
return STATEMENT;
|
|
}
|
|
|
|
|
|
void Query_arena::free_items()
|
|
{
|
|
Item *next;
|
|
DBUG_ENTER("Query_arena::free_items");
|
|
/* This works because items are allocated with sql_alloc() */
|
|
for (; free_list; free_list= next)
|
|
{
|
|
next= free_list->next;
|
|
DBUG_ASSERT(free_list != next);
|
|
free_list->delete_self();
|
|
}
|
|
/* Postcondition: free_list is 0 */
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void Query_arena::set_query_arena(Query_arena *set)
|
|
{
|
|
mem_root= set->mem_root;
|
|
free_list= set->free_list;
|
|
state= set->state;
|
|
}
|
|
|
|
|
|
void Query_arena::cleanup_stmt()
|
|
{
|
|
DBUG_ASSERT(! "Query_arena::cleanup_stmt() not implemented");
|
|
}
|
|
|
|
/*
|
|
Statement functions
|
|
*/
|
|
|
|
Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
|
|
enum enum_state state_arg, ulong id_arg)
|
|
:Query_arena(mem_root_arg, state_arg),
|
|
id(id_arg),
|
|
mark_used_columns(MARK_COLUMNS_READ),
|
|
lex(lex_arg),
|
|
cursor(0),
|
|
db(NULL),
|
|
db_length(0)
|
|
{
|
|
query_string.length= 0;
|
|
query_string.str= NULL;
|
|
name.str= NULL;
|
|
}
|
|
|
|
|
|
Query_arena::Type Statement::type() const
|
|
{
|
|
return STATEMENT;
|
|
}
|
|
|
|
|
|
void Statement::set_statement(Statement *stmt)
|
|
{
|
|
id= stmt->id;
|
|
mark_used_columns= stmt->mark_used_columns;
|
|
lex= stmt->lex;
|
|
query_string= stmt->query_string;
|
|
cursor= stmt->cursor;
|
|
}
|
|
|
|
|
|
void
|
|
Statement::set_n_backup_statement(Statement *stmt, Statement *backup)
|
|
{
|
|
DBUG_ENTER("Statement::set_n_backup_statement");
|
|
backup->set_statement(this);
|
|
set_statement(stmt);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void Statement::restore_backup_statement(Statement *stmt, Statement *backup)
|
|
{
|
|
DBUG_ENTER("Statement::restore_backup_statement");
|
|
stmt->set_statement(this);
|
|
set_statement(backup);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/** Assign a new value to thd->query. */
|
|
|
|
void Statement::set_query_inner(char *query_arg, uint32 query_length_arg)
|
|
{
|
|
query_string.str= query_arg;
|
|
query_string.length= query_length_arg;
|
|
}
|
|
|
|
|
|
void THD::end_statement()
|
|
{
|
|
/* Cleanup SQL processing state to reuse this statement in next query. */
|
|
lex_end(lex);
|
|
delete lex->result;
|
|
lex->result= 0;
|
|
/* Note that free_list is freed in cleanup_after_query() */
|
|
|
|
/*
|
|
Don't free mem_root, as mem_root is freed in the end of dispatch_command
|
|
(once for any command).
|
|
*/
|
|
}
|
|
|
|
|
|
void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup)
|
|
{
|
|
DBUG_ENTER("THD::set_n_backup_active_arena");
|
|
DBUG_ASSERT(backup->is_backup_arena == FALSE);
|
|
|
|
backup->set_query_arena(this);
|
|
set_query_arena(set);
|
|
#ifndef DBUG_OFF
|
|
backup->is_backup_arena= TRUE;
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::restore_active_arena(Query_arena *set, Query_arena *backup)
|
|
{
|
|
DBUG_ENTER("THD::restore_active_arena");
|
|
DBUG_ASSERT(backup->is_backup_arena);
|
|
set->set_query_arena(this);
|
|
set_query_arena(backup);
|
|
#ifndef DBUG_OFF
|
|
backup->is_backup_arena= FALSE;
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
Statement::~Statement()
|
|
{
|
|
}
|
|
|
|
C_MODE_START
|
|
|
|
static uchar *
|
|
get_statement_id_as_hash_key(const uchar *record, size_t *key_length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
const Statement *statement= (const Statement *) record;
|
|
*key_length= sizeof(statement->id);
|
|
return (uchar *) &((const Statement *) statement)->id;
|
|
}
|
|
|
|
static void delete_statement_as_hash_key(void *key)
|
|
{
|
|
delete (Statement *) key;
|
|
}
|
|
|
|
static uchar *get_stmt_name_hash_key(Statement *entry, size_t *length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
*length= entry->name.length;
|
|
return (uchar*) entry->name.str;
|
|
}
|
|
|
|
C_MODE_END
|
|
|
|
Statement_map::Statement_map() :
|
|
last_found_statement(0)
|
|
{
|
|
enum
|
|
{
|
|
START_STMT_HASH_SIZE = 16,
|
|
START_NAME_HASH_SIZE = 16
|
|
};
|
|
hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
|
|
get_statement_id_as_hash_key,
|
|
delete_statement_as_hash_key, MYF(0));
|
|
hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
|
|
(hash_get_key) get_stmt_name_hash_key,
|
|
NULL,MYF(0));
|
|
}
|
|
|
|
|
|
/*
|
|
Insert a new statement to the thread-local statement map.
|
|
|
|
DESCRIPTION
|
|
If there was an old statement with the same name, replace it with the
|
|
new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
|
|
increase prepared_stmt_count, and insert the new statement. It's okay
|
|
to delete an old statement and fail to insert the new one.
|
|
|
|
POSTCONDITIONS
|
|
All named prepared statements are also present in names_hash.
|
|
Statement names in names_hash are unique.
|
|
The statement is added only if prepared_stmt_count < max_prepard_stmt_count
|
|
last_found_statement always points to a valid statement or is 0
|
|
|
|
RETURN VALUE
|
|
0 success
|
|
1 error: out of resources or max_prepared_stmt_count limit has been
|
|
reached. An error is sent to the client, the statement is deleted.
|
|
*/
|
|
|
|
int Statement_map::insert(THD *thd, Statement *statement)
|
|
{
|
|
if (my_hash_insert(&st_hash, (uchar*) statement))
|
|
{
|
|
/*
|
|
Delete is needed only in case of an insert failure. In all other
|
|
cases hash_delete will also delete the statement.
|
|
*/
|
|
delete statement;
|
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
|
goto err_st_hash;
|
|
}
|
|
if (statement->name.str && my_hash_insert(&names_hash, (uchar*) statement))
|
|
{
|
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
|
goto err_names_hash;
|
|
}
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
/*
|
|
We don't check that prepared_stmt_count is <= max_prepared_stmt_count
|
|
because we would like to allow to lower the total limit
|
|
of prepared statements below the current count. In that case
|
|
no new statements can be added until prepared_stmt_count drops below
|
|
the limit.
|
|
*/
|
|
if (prepared_stmt_count >= max_prepared_stmt_count)
|
|
{
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
|
|
max_prepared_stmt_count);
|
|
goto err_max;
|
|
}
|
|
prepared_stmt_count++;
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
last_found_statement= statement;
|
|
return 0;
|
|
|
|
err_max:
|
|
if (statement->name.str)
|
|
hash_delete(&names_hash, (uchar*) statement);
|
|
err_names_hash:
|
|
hash_delete(&st_hash, (uchar*) statement);
|
|
err_st_hash:
|
|
return 1;
|
|
}
|
|
|
|
|
|
void Statement_map::close_transient_cursors()
|
|
{
|
|
#ifdef TO_BE_IMPLEMENTED
|
|
Statement *stmt;
|
|
while ((stmt= transient_cursor_list.head()))
|
|
stmt->close_cursor(); /* deletes itself from the list */
|
|
#endif
|
|
}
|
|
|
|
|
|
void Statement_map::erase(Statement *statement)
|
|
{
|
|
if (statement == last_found_statement)
|
|
last_found_statement= 0;
|
|
if (statement->name.str)
|
|
hash_delete(&names_hash, (uchar *) statement);
|
|
|
|
hash_delete(&st_hash, (uchar *) statement);
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
DBUG_ASSERT(prepared_stmt_count > 0);
|
|
prepared_stmt_count--;
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
}
|
|
|
|
|
|
void Statement_map::reset()
|
|
{
|
|
/* Must be first, hash_free will reset st_hash.records */
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
|
|
prepared_stmt_count-= st_hash.records;
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
my_hash_reset(&names_hash);
|
|
my_hash_reset(&st_hash);
|
|
last_found_statement= 0;
|
|
}
|
|
|
|
|
|
Statement_map::~Statement_map()
|
|
{
|
|
/* Must go first, hash_free will reset st_hash.records */
|
|
pthread_mutex_lock(&LOCK_prepared_stmt_count);
|
|
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
|
|
prepared_stmt_count-= st_hash.records;
|
|
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
hash_free(&names_hash);
|
|
hash_free(&st_hash);
|
|
}
|
|
|
|
int select_dumpvar::send_data(List<Item> &items)
|
|
{
|
|
List_iterator_fast<my_var> var_li(var_list);
|
|
List_iterator<Item> it(items);
|
|
Item *item;
|
|
my_var *mv;
|
|
DBUG_ENTER("select_dumpvar::send_data");
|
|
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (row_count++)
|
|
{
|
|
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
|
|
DBUG_RETURN(1);
|
|
}
|
|
while ((mv= var_li++) && (item= it++))
|
|
{
|
|
if (mv->local)
|
|
{
|
|
if (thd->spcont->set_variable(thd, mv->offset, &item))
|
|
DBUG_RETURN(1);
|
|
}
|
|
else
|
|
{
|
|
Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item);
|
|
if (suv->fix_fields(thd, 0))
|
|
DBUG_RETURN (1);
|
|
suv->save_item_result(item);
|
|
if (suv->update())
|
|
DBUG_RETURN (1);
|
|
}
|
|
}
|
|
DBUG_RETURN(thd->is_error());
|
|
}
|
|
|
|
bool select_dumpvar::send_eof()
|
|
{
|
|
if (! row_count)
|
|
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
|
|
ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA));
|
|
/*
|
|
In order to remember the value of affected rows for ROW_COUNT()
|
|
function, SELECT INTO has to have an own SQLCOM.
|
|
TODO: split from SQLCOM_SELECT
|
|
*/
|
|
::my_ok(thd,row_count);
|
|
return 0;
|
|
}
|
|
|
|
|
|
bool
|
|
select_materialize_with_stats::
|
|
create_result_table(THD *thd_arg, List<Item> *column_types,
|
|
bool is_union_distinct, ulonglong options,
|
|
const char *table_alias, bool bit_fields_as_long,
|
|
bool create_table)
|
|
{
|
|
DBUG_ASSERT(table == 0);
|
|
tmp_table_param.field_count= column_types->elements;
|
|
tmp_table_param.bit_fields_as_long= bit_fields_as_long;
|
|
|
|
if (! (table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
|
|
(ORDER*) 0, is_union_distinct, 1,
|
|
options, HA_POS_ERROR, (char*) table_alias)))
|
|
return TRUE;
|
|
|
|
col_stat= (Column_statistics*) table->in_use->alloc(table->s->fields *
|
|
sizeof(Column_statistics));
|
|
if (!col_stat)
|
|
return TRUE;
|
|
|
|
reset();
|
|
table->file->extra(HA_EXTRA_WRITE_CACHE);
|
|
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
void select_materialize_with_stats::reset()
|
|
{
|
|
memset(col_stat, 0, table->s->fields * sizeof(Column_statistics));
|
|
max_nulls_in_row= 0;
|
|
count_rows= 0;
|
|
}
|
|
|
|
|
|
void select_materialize_with_stats::cleanup()
|
|
{
|
|
reset();
|
|
select_union::cleanup();
|
|
}
|
|
|
|
|
|
/**
|
|
Override select_union::send_data to analyze each row for NULLs and to
|
|
update null_statistics before sending data to the client.
|
|
|
|
@return TRUE if fatal error when sending data to the client
|
|
@return FALSE on success
|
|
*/
|
|
|
|
int select_materialize_with_stats::send_data(List<Item> &items)
|
|
{
|
|
List_iterator_fast<Item> item_it(items);
|
|
Item *cur_item;
|
|
Column_statistics *cur_col_stat= col_stat;
|
|
uint nulls_in_row= 0;
|
|
int res;
|
|
|
|
if ((res= select_union::send_data(items)))
|
|
return res;
|
|
/* Skip duplicate rows. */
|
|
if (write_err == HA_ERR_FOUND_DUPP_KEY ||
|
|
write_err == HA_ERR_FOUND_DUPP_UNIQUE)
|
|
return 0;
|
|
|
|
++count_rows;
|
|
|
|
while ((cur_item= item_it++))
|
|
{
|
|
if (cur_item->is_null_result())
|
|
{
|
|
++cur_col_stat->null_count;
|
|
cur_col_stat->max_null_row= count_rows;
|
|
if (!cur_col_stat->min_null_row)
|
|
cur_col_stat->min_null_row= count_rows;
|
|
++nulls_in_row;
|
|
}
|
|
++cur_col_stat;
|
|
}
|
|
if (nulls_in_row > max_nulls_in_row)
|
|
max_nulls_in_row= nulls_in_row;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/****************************************************************************
|
|
TMP_TABLE_PARAM
|
|
****************************************************************************/
|
|
|
|
void TMP_TABLE_PARAM::init()
|
|
{
|
|
DBUG_ENTER("TMP_TABLE_PARAM::init");
|
|
DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
|
|
field_count= sum_func_count= func_count= hidden_field_count= 0;
|
|
group_parts= group_length= group_null_parts= 0;
|
|
quick_group= 1;
|
|
table_charset= 0;
|
|
precomputed_group_by= 0;
|
|
bit_fields_as_long= 0;
|
|
materialized_subquery= 0;
|
|
skip_create_table= 0;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void thd_increment_bytes_sent(ulong length)
|
|
{
|
|
THD *thd=current_thd;
|
|
if (likely(thd != 0))
|
|
{
|
|
/* current_thd == 0 when close_connection() calls net_send_error() */
|
|
thd->status_var.bytes_sent+= length;
|
|
}
|
|
}
|
|
|
|
|
|
void thd_increment_bytes_received(ulong length)
|
|
{
|
|
current_thd->status_var.bytes_received+= length;
|
|
}
|
|
|
|
|
|
void thd_increment_net_big_packet_count(ulong length)
|
|
{
|
|
current_thd->status_var.net_big_packet_count+= length;
|
|
}
|
|
|
|
|
|
void THD::set_status_var_init()
|
|
{
|
|
bzero((char*) &status_var, sizeof(status_var));
|
|
}
|
|
|
|
|
|
void Security_context::init()
|
|
{
|
|
host= user= ip= 0;
|
|
host_or_ip= "connecting host";
|
|
priv_user[0]= priv_host[0]= '\0';
|
|
master_access= 0;
|
|
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
|
db_access= NO_ACCESS;
|
|
#endif
|
|
}
|
|
|
|
|
|
void Security_context::destroy()
|
|
{
|
|
// If not pointer to constant
|
|
if (host != my_localhost)
|
|
safeFree(host);
|
|
if (user != delayed_user)
|
|
safeFree(user);
|
|
safeFree(ip);
|
|
}
|
|
|
|
|
|
void Security_context::skip_grants()
|
|
{
|
|
/* privileges for the user are unknown everything is allowed */
|
|
host_or_ip= (char *)"";
|
|
master_access= ~NO_ACCESS;
|
|
*priv_user= *priv_host= '\0';
|
|
}
|
|
|
|
|
|
bool Security_context::set_user(char *user_arg)
|
|
{
|
|
safeFree(user);
|
|
user= my_strdup(user_arg, MYF(0));
|
|
return user == 0;
|
|
}
|
|
|
|
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
|
/**
|
|
Initialize this security context from the passed in credentials
|
|
and activate it in the current thread.
|
|
|
|
@param thd
|
|
@param definer_user
|
|
@param definer_host
|
|
@param db
|
|
@param[out] backup Save a pointer to the current security context
|
|
in the thread. In case of success it points to the
|
|
saved old context, otherwise it points to NULL.
|
|
|
|
|
|
During execution of a statement, multiple security contexts may
|
|
be needed:
|
|
- the security context of the authenticated user, used as the
|
|
default security context for all top-level statements
|
|
- in case of a view or a stored program, possibly the security
|
|
context of the definer of the routine, if the object is
|
|
defined with SQL SECURITY DEFINER option.
|
|
|
|
The currently "active" security context is parameterized in THD
|
|
member security_ctx. By default, after a connection is
|
|
established, this member points at the "main" security context
|
|
- the credentials of the authenticated user.
|
|
|
|
Later, if we would like to execute some sub-statement or a part
|
|
of a statement under credentials of a different user, e.g.
|
|
definer of a procedure, we authenticate this user in a local
|
|
instance of Security_context by means of this method (and
|
|
ultimately by means of acl_getroot), and make the
|
|
local instance active in the thread by re-setting
|
|
thd->security_ctx pointer.
|
|
|
|
Note, that the life cycle and memory management of the "main" and
|
|
temporary security contexts are different.
|
|
For the main security context, the memory for user/host/ip is
|
|
allocated on system heap, and the THD class frees this memory in
|
|
its destructor. The only case when contents of the main security
|
|
context may change during its life time is when someone issued
|
|
CHANGE USER command.
|
|
Memory management of a "temporary" security context is
|
|
responsibility of the module that creates it.
|
|
|
|
@retval TRUE there is no user with the given credentials. The erro
|
|
is reported in the thread.
|
|
@retval FALSE success
|
|
*/
|
|
|
|
bool
|
|
Security_context::
|
|
change_security_context(THD *thd,
|
|
LEX_STRING *definer_user,
|
|
LEX_STRING *definer_host,
|
|
LEX_STRING *db,
|
|
Security_context **backup)
|
|
{
|
|
bool needs_change;
|
|
|
|
DBUG_ENTER("Security_context::change_security_context");
|
|
|
|
DBUG_ASSERT(definer_user->str && definer_host->str);
|
|
|
|
*backup= NULL;
|
|
needs_change= (strcmp(definer_user->str, thd->security_ctx->priv_user) ||
|
|
my_strcasecmp(system_charset_info, definer_host->str,
|
|
thd->security_ctx->priv_host));
|
|
if (needs_change)
|
|
{
|
|
if (acl_getroot(this, definer_user->str, definer_host->str,
|
|
definer_host->str, db->str))
|
|
{
|
|
my_error(ER_NO_SUCH_USER, MYF(0), definer_user->str,
|
|
definer_host->str);
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
*backup= thd->security_ctx;
|
|
thd->security_ctx= this;
|
|
}
|
|
|
|
DBUG_RETURN(FALSE);
|
|
}
|
|
|
|
|
|
void
|
|
Security_context::restore_security_context(THD *thd,
|
|
Security_context *backup)
|
|
{
|
|
if (backup)
|
|
thd->security_ctx= backup;
|
|
}
|
|
#endif
|
|
|
|
|
|
bool Security_context::user_matches(Security_context *them)
|
|
{
|
|
return ((user != NULL) && (them->user != NULL) &&
|
|
!strcmp(user, them->user));
|
|
}
|
|
|
|
|
|
/****************************************************************************
|
|
Handling of open and locked tables states.
|
|
|
|
This is used when we want to open/lock (and then close) some tables when
|
|
we already have a set of tables open and locked. We use these methods for
|
|
access to mysql.proc table to find definitions of stored routines.
|
|
****************************************************************************/
|
|
|
|
void THD::reset_n_backup_open_tables_state(Open_tables_state *backup)
|
|
{
|
|
DBUG_ENTER("reset_n_backup_open_tables_state");
|
|
backup->set_open_tables_state(this);
|
|
reset_open_tables_state();
|
|
state_flags|= Open_tables_state::BACKUPS_AVAIL;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::restore_backup_open_tables_state(Open_tables_state *backup)
|
|
{
|
|
DBUG_ENTER("restore_backup_open_tables_state");
|
|
/*
|
|
Before we will throw away current open tables state we want
|
|
to be sure that it was properly cleaned up.
|
|
*/
|
|
DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 &&
|
|
handler_tables == 0 && derived_tables == 0 &&
|
|
lock == 0 && locked_tables == 0 &&
|
|
prelocked_mode == NON_PRELOCKED &&
|
|
m_reprepare_observer == NULL);
|
|
set_open_tables_state(backup);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/**
|
|
Check the killed state of a user thread
|
|
@param thd user thread
|
|
@retval 0 the user thread is active
|
|
@retval 1 the user thread has been killed
|
|
|
|
This is used to signal a storage engine if it should be killed.
|
|
*/
|
|
|
|
extern "C" int thd_killed(const MYSQL_THD thd)
|
|
{
|
|
if (!(thd->killed & KILL_HARD_BIT))
|
|
return 0;
|
|
return thd->killed;
|
|
}
|
|
|
|
|
|
/**
|
|
Send an out-of-band progress report to the client
|
|
|
|
The report is sent every 'thd->...progress_report_time' second,
|
|
however not more often than global.progress_report_time.
|
|
If global.progress_report_time is 0, then don't send progress reports, but
|
|
check every second if the value has changed
|
|
*/
|
|
|
|
static void thd_send_progress(THD *thd)
|
|
{
|
|
/* Check if we should send the client a progress report */
|
|
ulonglong report_time= my_interval_timer();
|
|
if (report_time > thd->progress.next_report_time)
|
|
{
|
|
uint seconds_to_next= max(thd->variables.progress_report_time,
|
|
global_system_variables.progress_report_time);
|
|
if (seconds_to_next == 0) // Turned off
|
|
seconds_to_next= 1; // Check again after 1 second
|
|
|
|
thd->progress.next_report_time= (report_time +
|
|
seconds_to_next * 1000000000ULL);
|
|
if (global_system_variables.progress_report_time &&
|
|
thd->variables.progress_report_time)
|
|
net_send_progress_packet(thd);
|
|
}
|
|
}
|
|
|
|
|
|
/** Initialize progress report handling **/
|
|
|
|
extern "C" void thd_progress_init(MYSQL_THD thd, uint max_stage)
|
|
{
|
|
/*
|
|
Send progress reports to clients that supports it, if the command
|
|
is a high level command (like ALTER TABLE) and we are not in a
|
|
stored procedure
|
|
*/
|
|
thd->progress.report= ((thd->client_capabilities & CLIENT_PROGRESS) &&
|
|
thd->progress.report_to_client &&
|
|
!thd->in_sub_stmt);
|
|
thd->progress.next_report_time= 0;
|
|
thd->progress.stage= 0;
|
|
thd->progress.counter= thd->progress.max_counter= 0;
|
|
thd->progress.max_stage= max_stage;
|
|
}
|
|
|
|
|
|
/* Inform processlist and the client that some progress has been made */
|
|
|
|
extern "C" void thd_progress_report(MYSQL_THD thd,
|
|
ulonglong progress, ulonglong max_progress)
|
|
{
|
|
if (thd->progress.max_counter != max_progress) // Simple optimization
|
|
{
|
|
pthread_mutex_lock(&thd->LOCK_thd_data);
|
|
thd->progress.counter= progress;
|
|
thd->progress.max_counter= max_progress;
|
|
pthread_mutex_unlock(&thd->LOCK_thd_data);
|
|
}
|
|
else
|
|
thd->progress.counter= progress;
|
|
|
|
if (thd->progress.report)
|
|
thd_send_progress(thd);
|
|
}
|
|
|
|
/**
|
|
Move to next stage in process list handling
|
|
|
|
This will reset the timer to ensure the progress is sent to the client
|
|
if client progress reports are activated.
|
|
*/
|
|
|
|
extern "C" void thd_progress_next_stage(MYSQL_THD thd)
|
|
{
|
|
pthread_mutex_lock(&thd->LOCK_thd_data);
|
|
thd->progress.stage++;
|
|
thd->progress.counter= 0;
|
|
DBUG_ASSERT(thd->progress.stage < thd->progress.max_stage);
|
|
pthread_mutex_unlock(&thd->LOCK_thd_data);
|
|
if (thd->progress.report)
|
|
{
|
|
thd->progress.next_report_time= 0; // Send new stage info
|
|
thd_send_progress(thd);
|
|
}
|
|
}
|
|
|
|
/**
|
|
Disable reporting of progress in process list.
|
|
|
|
@note
|
|
This function is safe to call even if one has not called thd_progress_init.
|
|
|
|
This function should be called by all parts that does progress
|
|
reporting to ensure that progress list doesn't contain 100 % done
|
|
forever.
|
|
*/
|
|
|
|
|
|
extern "C" void thd_progress_end(MYSQL_THD thd)
|
|
{
|
|
/*
|
|
It's enough to reset max_counter to set disable progress indicator
|
|
in processlist.
|
|
*/
|
|
thd->progress.max_counter= 0;
|
|
}
|
|
|
|
|
|
/**
|
|
Return the thread id of a user thread
|
|
@param thd user thread
|
|
@return thread id
|
|
*/
|
|
extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd)
|
|
{
|
|
return((unsigned long)thd->thread_id);
|
|
}
|
|
|
|
|
|
#ifdef INNODB_COMPATIBILITY_HOOKS
|
|
extern "C" const struct charset_info_st *thd_charset(MYSQL_THD thd)
|
|
{
|
|
return(thd->charset());
|
|
}
|
|
|
|
/**
|
|
OBSOLETE : there's no way to ensure the string is null terminated.
|
|
Use thd_query_string instead()
|
|
*/
|
|
extern "C" char **thd_query(MYSQL_THD thd)
|
|
{
|
|
return(&thd->query_string.str);
|
|
}
|
|
|
|
/**
|
|
Get the current query string for the thread.
|
|
|
|
@param The MySQL internal thread pointer
|
|
@return query string and length. May be non-null-terminated.
|
|
*/
|
|
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd)
|
|
{
|
|
return(&thd->query_string);
|
|
}
|
|
|
|
extern "C" int thd_slave_thread(const MYSQL_THD thd)
|
|
{
|
|
return(thd->slave_thread);
|
|
}
|
|
|
|
extern "C" int thd_non_transactional_update(const MYSQL_THD thd)
|
|
{
|
|
return(thd->transaction.all.modified_non_trans_table);
|
|
}
|
|
|
|
extern "C" int thd_binlog_format(const MYSQL_THD thd)
|
|
{
|
|
if (mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG))
|
|
return (int) thd->variables.binlog_format;
|
|
else
|
|
return BINLOG_FORMAT_UNSPEC;
|
|
}
|
|
|
|
extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all)
|
|
{
|
|
mark_transaction_to_rollback(thd, all);
|
|
}
|
|
|
|
extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd)
|
|
{
|
|
return binlog_filter->db_ok(thd->db);
|
|
}
|
|
#endif // INNODB_COMPATIBILITY_HOOKS */
|
|
|
|
/****************************************************************************
|
|
Handling of statement states in functions and triggers.
|
|
|
|
This is used to ensure that the function/trigger gets a clean state
|
|
to work with and does not cause any side effects of the calling statement.
|
|
|
|
It also allows most stored functions and triggers to replicate even
|
|
if they are used items that would normally be stored in the binary
|
|
replication (like last_insert_id() etc...)
|
|
|
|
The following things is done
|
|
- Disable binary logging for the duration of the statement
|
|
- Disable multi-result-sets for the duration of the statement
|
|
- Value of last_insert_id() is saved and restored
|
|
- Value set by 'SET INSERT_ID=#' is reset and restored
|
|
- Value for found_rows() is reset and restored
|
|
- examined_row_count is added to the total
|
|
- cuted_fields is added to the total
|
|
- new savepoint level is created and destroyed
|
|
|
|
NOTES:
|
|
Seed for random() is saved for the first! usage of RAND()
|
|
We reset examined_row_count and cuted_fields and add these to the
|
|
result to ensure that if we have a bug that would reset these within
|
|
a function, we are not loosing any rows from the main statement.
|
|
|
|
We do not reset value of last_insert_id().
|
|
****************************************************************************/
|
|
|
|
void THD::reset_sub_statement_state(Sub_statement_state *backup,
|
|
uint new_state)
|
|
{
|
|
#ifndef EMBEDDED_LIBRARY
|
|
/* BUG#33029, if we are replicating from a buggy master, reset
|
|
auto_inc_intervals_forced to prevent substatement
|
|
(triggers/functions) from using erroneous INSERT_ID value
|
|
*/
|
|
if (rpl_master_erroneous_autoinc(this))
|
|
{
|
|
DBUG_ASSERT(backup->auto_inc_intervals_forced.nb_elements() == 0);
|
|
auto_inc_intervals_forced.swap(&backup->auto_inc_intervals_forced);
|
|
}
|
|
#endif
|
|
|
|
backup->count_cuted_fields= count_cuted_fields;
|
|
backup->options= options;
|
|
backup->in_sub_stmt= in_sub_stmt;
|
|
backup->enable_slow_log= enable_slow_log;
|
|
backup->query_plan_flags= query_plan_flags;
|
|
backup->limit_found_rows= limit_found_rows;
|
|
backup->examined_row_count= examined_row_count;
|
|
backup->sent_row_count= sent_row_count;
|
|
backup->cuted_fields= cuted_fields;
|
|
backup->client_capabilities= client_capabilities;
|
|
backup->savepoints= transaction.savepoints;
|
|
backup->first_successful_insert_id_in_prev_stmt=
|
|
first_successful_insert_id_in_prev_stmt;
|
|
backup->first_successful_insert_id_in_cur_stmt=
|
|
first_successful_insert_id_in_cur_stmt;
|
|
|
|
if ((!lex->requires_prelocking() || is_update_query(lex->sql_command)) &&
|
|
!current_stmt_binlog_row_based)
|
|
{
|
|
options&= ~OPTION_BIN_LOG;
|
|
}
|
|
|
|
if ((backup->options & OPTION_BIN_LOG) && is_update_query(lex->sql_command)&&
|
|
!current_stmt_binlog_row_based)
|
|
mysql_bin_log.start_union_events(this, this->query_id);
|
|
|
|
/* Disable result sets */
|
|
client_capabilities &= ~CLIENT_MULTI_RESULTS;
|
|
in_sub_stmt|= new_state;
|
|
examined_row_count= 0;
|
|
sent_row_count= 0;
|
|
cuted_fields= 0;
|
|
transaction.savepoints= 0;
|
|
first_successful_insert_id_in_cur_stmt= 0;
|
|
}
|
|
|
|
|
|
void THD::restore_sub_statement_state(Sub_statement_state *backup)
|
|
{
|
|
DBUG_ENTER("THD::restore_sub_statement_state");
|
|
#ifndef EMBEDDED_LIBRARY
|
|
/* BUG#33029, if we are replicating from a buggy master, restore
|
|
auto_inc_intervals_forced so that the top statement can use the
|
|
INSERT_ID value set before this statement.
|
|
*/
|
|
if (rpl_master_erroneous_autoinc(this))
|
|
{
|
|
backup->auto_inc_intervals_forced.swap(&auto_inc_intervals_forced);
|
|
DBUG_ASSERT(backup->auto_inc_intervals_forced.nb_elements() == 0);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
To save resources we want to release savepoints which were created
|
|
during execution of function or trigger before leaving their savepoint
|
|
level. It is enough to release first savepoint set on this level since
|
|
all later savepoints will be released automatically.
|
|
*/
|
|
if (transaction.savepoints)
|
|
{
|
|
SAVEPOINT *sv;
|
|
for (sv= transaction.savepoints; sv->prev; sv= sv->prev)
|
|
{}
|
|
/* ha_release_savepoint() never returns error. */
|
|
(void)ha_release_savepoint(this, sv);
|
|
}
|
|
count_cuted_fields= backup->count_cuted_fields;
|
|
transaction.savepoints= backup->savepoints;
|
|
options= backup->options;
|
|
in_sub_stmt= backup->in_sub_stmt;
|
|
enable_slow_log= backup->enable_slow_log;
|
|
query_plan_flags= backup->query_plan_flags;
|
|
first_successful_insert_id_in_prev_stmt=
|
|
backup->first_successful_insert_id_in_prev_stmt;
|
|
first_successful_insert_id_in_cur_stmt=
|
|
backup->first_successful_insert_id_in_cur_stmt;
|
|
limit_found_rows= backup->limit_found_rows;
|
|
sent_row_count= backup->sent_row_count;
|
|
client_capabilities= backup->client_capabilities;
|
|
/*
|
|
If we've left sub-statement mode, reset the fatal error flag.
|
|
Otherwise keep the current value, to propagate it up the sub-statement
|
|
stack.
|
|
*/
|
|
if (!in_sub_stmt)
|
|
is_fatal_sub_stmt_error= FALSE;
|
|
|
|
if ((options & OPTION_BIN_LOG) && is_update_query(lex->sql_command) &&
|
|
!current_stmt_binlog_row_based)
|
|
mysql_bin_log.stop_union_events(this);
|
|
|
|
/*
|
|
The following is added to the old values as we are interested in the
|
|
total complexity of the query
|
|
*/
|
|
examined_row_count+= backup->examined_row_count;
|
|
cuted_fields+= backup->cuted_fields;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::set_statement(Statement *stmt)
|
|
{
|
|
pthread_mutex_lock(&LOCK_thd_data);
|
|
Statement::set_statement(stmt);
|
|
pthread_mutex_unlock(&LOCK_thd_data);
|
|
}
|
|
|
|
|
|
/** Assign a new value to thd->query. */
|
|
|
|
void THD::set_query(char *query_arg, uint32 query_length_arg)
|
|
{
|
|
pthread_mutex_lock(&LOCK_thd_data);
|
|
set_query_inner(query_arg, query_length_arg);
|
|
pthread_mutex_unlock(&LOCK_thd_data);
|
|
}
|
|
|
|
void THD::get_definer(LEX_USER *definer)
|
|
{
|
|
binlog_invoker();
|
|
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
|
|
if (slave_thread && has_invoker())
|
|
{
|
|
definer->user = invoker_user;
|
|
definer->host= invoker_host;
|
|
definer->password= null_lex_str;
|
|
definer->plugin= empty_lex_str;
|
|
definer->auth= empty_lex_str;
|
|
}
|
|
else
|
|
#endif
|
|
get_default_definer(this, definer);
|
|
}
|
|
|
|
|
|
/**
|
|
Mark transaction to rollback and mark error as fatal to a sub-statement.
|
|
|
|
@param thd Thread handle
|
|
@param all TRUE <=> rollback main transaction.
|
|
*/
|
|
|
|
void mark_transaction_to_rollback(THD *thd, bool all)
|
|
{
|
|
if (thd)
|
|
{
|
|
thd->is_fatal_sub_stmt_error= TRUE;
|
|
thd->transaction_rollback_request= all;
|
|
/*
|
|
Aborted transactions can not be IGNOREd.
|
|
Switch off the IGNORE flag for the current
|
|
SELECT_LEX. This should allow my_error()
|
|
to report the error and abort the execution
|
|
flow, even in presence
|
|
of IGNORE clause.
|
|
*/
|
|
if (thd->lex->current_select)
|
|
thd->lex->current_select->no_error= FALSE;
|
|
}
|
|
}
|
|
/***************************************************************************
|
|
Handling of XA id cacheing
|
|
***************************************************************************/
|
|
|
|
pthread_mutex_t LOCK_xid_cache;
|
|
HASH xid_cache;
|
|
|
|
extern "C" uchar *xid_get_hash_key(const uchar *, size_t *, my_bool);
|
|
extern "C" void xid_free_hash(void *);
|
|
|
|
uchar *xid_get_hash_key(const uchar *ptr, size_t *length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
*length=((XID_STATE*)ptr)->xid.key_length();
|
|
return ((XID_STATE*)ptr)->xid.key();
|
|
}
|
|
|
|
void xid_free_hash(void *ptr)
|
|
{
|
|
if (!((XID_STATE*)ptr)->in_thd)
|
|
my_free((uchar*)ptr, MYF(0));
|
|
}
|
|
|
|
bool xid_cache_init()
|
|
{
|
|
pthread_mutex_init(&LOCK_xid_cache, MY_MUTEX_INIT_FAST);
|
|
return hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
|
|
xid_get_hash_key, xid_free_hash, 0) != 0;
|
|
}
|
|
|
|
void xid_cache_free()
|
|
{
|
|
if (hash_inited(&xid_cache))
|
|
{
|
|
hash_free(&xid_cache);
|
|
pthread_mutex_destroy(&LOCK_xid_cache);
|
|
}
|
|
}
|
|
|
|
XID_STATE *xid_cache_search(XID *xid)
|
|
{
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
|
XID_STATE *res=(XID_STATE *)hash_search(&xid_cache, xid->key(), xid->key_length());
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
return res;
|
|
}
|
|
|
|
|
|
bool xid_cache_insert(XID *xid, enum xa_states xa_state)
|
|
{
|
|
XID_STATE *xs;
|
|
my_bool res;
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
|
if (hash_search(&xid_cache, xid->key(), xid->key_length()))
|
|
res=0;
|
|
else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME))))
|
|
res=1;
|
|
else
|
|
{
|
|
xs->xa_state=xa_state;
|
|
xs->xid.set(xid);
|
|
xs->in_thd=0;
|
|
res=my_hash_insert(&xid_cache, (uchar*)xs);
|
|
}
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
return res;
|
|
}
|
|
|
|
|
|
bool xid_cache_insert(XID_STATE *xid_state)
|
|
{
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
|
if (hash_search(&xid_cache, xid_state->xid.key(), xid_state->xid.key_length()))
|
|
{
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
my_error(ER_XAER_DUPID, MYF(0));
|
|
return TRUE;
|
|
}
|
|
my_bool res= my_hash_insert(&xid_cache, (uchar*)xid_state);
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
return res;
|
|
}
|
|
|
|
|
|
void xid_cache_delete(XID_STATE *xid_state)
|
|
{
|
|
pthread_mutex_lock(&LOCK_xid_cache);
|
|
hash_delete(&xid_cache, (uchar *)xid_state);
|
|
pthread_mutex_unlock(&LOCK_xid_cache);
|
|
}
|
|
|
|
/*
|
|
Implementation of interface to write rows to the binary log through the
|
|
thread. The thread is responsible for writing the rows it has
|
|
inserted/updated/deleted.
|
|
*/
|
|
|
|
#ifndef MYSQL_CLIENT
|
|
|
|
/*
|
|
Template member function for ensuring that there is an rows log
|
|
event of the apropriate type before proceeding.
|
|
|
|
PRE CONDITION:
|
|
- Events of type 'RowEventT' have the type code 'type_code'.
|
|
|
|
POST CONDITION:
|
|
If a non-NULL pointer is returned, the pending event for thread 'thd' will
|
|
be an event of type 'RowEventT' (which have the type code 'type_code')
|
|
will either empty or have enough space to hold 'needed' bytes. In
|
|
addition, the columns bitmap will be correct for the row, meaning that
|
|
the pending event will be flushed if the columns in the event differ from
|
|
the columns suppled to the function.
|
|
|
|
RETURNS
|
|
If no error, a non-NULL pending event (either one which already existed or
|
|
the newly created one).
|
|
If error, NULL.
|
|
*/
|
|
|
|
template <class RowsEventT> Rows_log_event*
|
|
THD::binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
|
|
MY_BITMAP const* cols,
|
|
size_t colcnt,
|
|
size_t needed,
|
|
bool is_transactional,
|
|
RowsEventT *hint __attribute__((unused)))
|
|
{
|
|
DBUG_ENTER("binlog_prepare_pending_rows_event");
|
|
/* Pre-conditions */
|
|
DBUG_ASSERT(table->s->table_map_id != ~0UL);
|
|
|
|
/* Fetch the type code for the RowsEventT template parameter */
|
|
int const type_code= RowsEventT::TYPE_CODE;
|
|
|
|
/*
|
|
There is no good place to set up the transactional data, so we
|
|
have to do it here.
|
|
*/
|
|
if (binlog_setup_trx_data())
|
|
DBUG_RETURN(NULL);
|
|
|
|
Rows_log_event* pending= binlog_get_pending_rows_event();
|
|
|
|
if (unlikely(pending && !pending->is_valid()))
|
|
DBUG_RETURN(NULL);
|
|
|
|
/*
|
|
Check if the current event is non-NULL and a write-rows
|
|
event. Also check if the table provided is mapped: if it is not,
|
|
then we have switched to writing to a new table.
|
|
If there is no pending event, we need to create one. If there is a pending
|
|
event, but it's not about the same table id, or not of the same type
|
|
(between Write, Update and Delete), or not the same affected columns, or
|
|
going to be too big, flush this event to disk and create a new pending
|
|
event.
|
|
*/
|
|
if (!pending ||
|
|
pending->server_id != serv_id ||
|
|
pending->get_table_id() != table->s->table_map_id ||
|
|
pending->get_type_code() != type_code ||
|
|
pending->get_data_size() + needed > opt_binlog_rows_event_max_size ||
|
|
pending->get_width() != colcnt ||
|
|
!bitmap_cmp(pending->get_cols(), cols))
|
|
{
|
|
/* Create a new RowsEventT... */
|
|
Rows_log_event* const
|
|
ev= new RowsEventT(this, table, table->s->table_map_id, cols,
|
|
is_transactional);
|
|
if (unlikely(!ev))
|
|
DBUG_RETURN(NULL);
|
|
ev->server_id= serv_id; // I don't like this, it's too easy to forget.
|
|
/*
|
|
flush the pending event and replace it with the newly created
|
|
event...
|
|
*/
|
|
if (unlikely(mysql_bin_log.flush_and_set_pending_rows_event(this, ev)))
|
|
{
|
|
delete ev;
|
|
DBUG_RETURN(NULL);
|
|
}
|
|
|
|
DBUG_RETURN(ev); /* This is the new pending event */
|
|
}
|
|
DBUG_RETURN(pending); /* This is the current pending event */
|
|
}
|
|
|
|
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
|
|
/*
|
|
Instantiate the versions we need, we have -fno-implicit-template as
|
|
compiling option.
|
|
*/
|
|
template Rows_log_event*
|
|
THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
|
|
size_t, size_t, bool,
|
|
Write_rows_log_event*);
|
|
|
|
template Rows_log_event*
|
|
THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
|
|
size_t colcnt, size_t, bool,
|
|
Delete_rows_log_event *);
|
|
|
|
template Rows_log_event*
|
|
THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
|
|
size_t colcnt, size_t, bool,
|
|
Update_rows_log_event *);
|
|
#endif
|
|
|
|
|
|
namespace {
|
|
/**
|
|
Class to handle temporary allocation of memory for row data.
|
|
|
|
The responsibilities of the class is to provide memory for
|
|
packing one or two rows of packed data (depending on what
|
|
constructor is called).
|
|
|
|
In order to make the allocation more efficient for "simple" rows,
|
|
i.e., rows that do not contain any blobs, a pointer to the
|
|
allocated memory is of memory is stored in the table structure
|
|
for simple rows. If memory for a table containing a blob field
|
|
is requested, only memory for that is allocated, and subsequently
|
|
released when the object is destroyed.
|
|
|
|
*/
|
|
class Row_data_memory {
|
|
public:
|
|
/**
|
|
Build an object to keep track of a block-local piece of memory
|
|
for storing a row of data.
|
|
|
|
@param table
|
|
Table where the pre-allocated memory is stored.
|
|
|
|
@param length
|
|
Length of data that is needed, if the record contain blobs.
|
|
*/
|
|
Row_data_memory(TABLE *table, size_t const len1)
|
|
: m_memory(0)
|
|
{
|
|
#ifndef DBUG_OFF
|
|
m_alloc_checked= FALSE;
|
|
#endif
|
|
allocate_memory(table, len1);
|
|
m_ptr[0]= has_memory() ? m_memory : 0;
|
|
m_ptr[1]= 0;
|
|
}
|
|
|
|
Row_data_memory(TABLE *table, size_t const len1, size_t const len2)
|
|
: m_memory(0)
|
|
{
|
|
#ifndef DBUG_OFF
|
|
m_alloc_checked= FALSE;
|
|
#endif
|
|
allocate_memory(table, len1 + len2);
|
|
m_ptr[0]= has_memory() ? m_memory : 0;
|
|
m_ptr[1]= has_memory() ? m_memory + len1 : 0;
|
|
}
|
|
|
|
~Row_data_memory()
|
|
{
|
|
if (m_memory != 0 && m_release_memory_on_destruction)
|
|
my_free((uchar*) m_memory, MYF(MY_WME));
|
|
}
|
|
|
|
/**
|
|
Is there memory allocated?
|
|
|
|
@retval true There is memory allocated
|
|
@retval false Memory allocation failed
|
|
*/
|
|
bool has_memory() const {
|
|
#ifndef DBUG_OFF
|
|
m_alloc_checked= TRUE;
|
|
#endif
|
|
return m_memory != 0;
|
|
}
|
|
|
|
uchar *slot(uint s)
|
|
{
|
|
DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr));
|
|
DBUG_ASSERT(m_ptr[s] != 0);
|
|
DBUG_ASSERT(m_alloc_checked == TRUE);
|
|
return m_ptr[s];
|
|
}
|
|
|
|
private:
|
|
void allocate_memory(TABLE *const table, size_t const total_length)
|
|
{
|
|
if (table->s->blob_fields == 0)
|
|
{
|
|
/*
|
|
The maximum length of a packed record is less than this
|
|
length. We use this value instead of the supplied length
|
|
when allocating memory for records, since we don't know how
|
|
the memory will be used in future allocations.
|
|
|
|
Since table->s->reclength is for unpacked records, we have
|
|
to add two bytes for each field, which can potentially be
|
|
added to hold the length of a packed field.
|
|
*/
|
|
size_t const maxlen= table->s->reclength + 2 * table->s->fields;
|
|
|
|
/*
|
|
Allocate memory for two records if memory hasn't been
|
|
allocated. We allocate memory for two records so that it can
|
|
be used when processing update rows as well.
|
|
*/
|
|
if (table->write_row_record == 0)
|
|
table->write_row_record=
|
|
(uchar *) alloc_root(&table->mem_root, 2 * maxlen);
|
|
m_memory= table->write_row_record;
|
|
m_release_memory_on_destruction= FALSE;
|
|
}
|
|
else
|
|
{
|
|
m_memory= (uchar *) my_malloc(total_length, MYF(MY_WME));
|
|
m_release_memory_on_destruction= TRUE;
|
|
}
|
|
}
|
|
|
|
#ifndef DBUG_OFF
|
|
mutable bool m_alloc_checked;
|
|
#endif
|
|
bool m_release_memory_on_destruction;
|
|
uchar *m_memory;
|
|
uchar *m_ptr[2];
|
|
};
|
|
}
|
|
|
|
|
|
int THD::binlog_write_row(TABLE* table, bool is_trans,
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
uchar const *record)
|
|
{
|
|
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
|
|
|
|
/*
|
|
Pack records into format for transfer. We are allocating more
|
|
memory than needed, but that doesn't matter.
|
|
*/
|
|
Row_data_memory memory(table, max_row_length(table, record));
|
|
if (!memory.has_memory())
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
uchar *row_data= memory.slot(0);
|
|
|
|
size_t const len= pack_row(table, cols, row_data, record);
|
|
|
|
Rows_log_event* const ev=
|
|
binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
|
|
len, is_trans,
|
|
static_cast<Write_rows_log_event*>(0));
|
|
|
|
if (unlikely(ev == 0))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
return ev->add_row_data(row_data, len);
|
|
}
|
|
|
|
int THD::binlog_update_row(TABLE* table, bool is_trans,
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
const uchar *before_record,
|
|
const uchar *after_record)
|
|
{
|
|
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
|
|
|
|
size_t const before_maxlen = max_row_length(table, before_record);
|
|
size_t const after_maxlen = max_row_length(table, after_record);
|
|
|
|
Row_data_memory row_data(table, before_maxlen, after_maxlen);
|
|
if (!row_data.has_memory())
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
uchar *before_row= row_data.slot(0);
|
|
uchar *after_row= row_data.slot(1);
|
|
|
|
size_t const before_size= pack_row(table, cols, before_row,
|
|
before_record);
|
|
size_t const after_size= pack_row(table, cols, after_row,
|
|
after_record);
|
|
|
|
/*
|
|
Don't print debug messages when running valgrind since they can
|
|
trigger false warnings.
|
|
*/
|
|
#ifndef HAVE_valgrind
|
|
DBUG_DUMP("before_record", before_record, table->s->reclength);
|
|
DBUG_DUMP("after_record", after_record, table->s->reclength);
|
|
DBUG_DUMP("before_row", before_row, before_size);
|
|
DBUG_DUMP("after_row", after_row, after_size);
|
|
#endif
|
|
|
|
Rows_log_event* const ev=
|
|
binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
|
|
before_size + after_size, is_trans,
|
|
static_cast<Update_rows_log_event*>(0));
|
|
|
|
if (unlikely(ev == 0))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
return
|
|
ev->add_row_data(before_row, before_size) ||
|
|
ev->add_row_data(after_row, after_size);
|
|
}
|
|
|
|
int THD::binlog_delete_row(TABLE* table, bool is_trans,
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
uchar const *record)
|
|
{
|
|
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
|
|
|
|
/*
|
|
Pack records into format for transfer. We are allocating more
|
|
memory than needed, but that doesn't matter.
|
|
*/
|
|
Row_data_memory memory(table, max_row_length(table, record));
|
|
if (unlikely(!memory.has_memory()))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
uchar *row_data= memory.slot(0);
|
|
|
|
size_t const len= pack_row(table, cols, row_data, record);
|
|
|
|
Rows_log_event* const ev=
|
|
binlog_prepare_pending_rows_event(table, server_id, cols, colcnt,
|
|
len, is_trans,
|
|
static_cast<Delete_rows_log_event*>(0));
|
|
|
|
if (unlikely(ev == 0))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
return ev->add_row_data(row_data, len);
|
|
}
|
|
|
|
|
|
int THD::binlog_remove_pending_rows_event(bool clear_maps)
|
|
{
|
|
DBUG_ENTER("THD::binlog_remove_pending_rows_event");
|
|
|
|
if (!mysql_bin_log.is_open())
|
|
DBUG_RETURN(0);
|
|
|
|
mysql_bin_log.remove_pending_rows_event(this);
|
|
|
|
if (clear_maps)
|
|
binlog_table_maps= 0;
|
|
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
int THD::binlog_flush_pending_rows_event(bool stmt_end)
|
|
{
|
|
DBUG_ENTER("THD::binlog_flush_pending_rows_event");
|
|
/*
|
|
We shall flush the pending event even if we are not in row-based
|
|
mode: it might be the case that we left row-based mode before
|
|
flushing anything (e.g., if we have explicitly locked tables).
|
|
*/
|
|
if (!mysql_bin_log.is_open())
|
|
DBUG_RETURN(0);
|
|
|
|
/*
|
|
Mark the event as the last event of a statement if the stmt_end
|
|
flag is set.
|
|
*/
|
|
int error= 0;
|
|
if (Rows_log_event *pending= binlog_get_pending_rows_event())
|
|
{
|
|
if (stmt_end)
|
|
{
|
|
pending->set_flags(Rows_log_event::STMT_END_F);
|
|
binlog_table_maps= 0;
|
|
}
|
|
|
|
error= mysql_bin_log.flush_and_set_pending_rows_event(this, 0);
|
|
}
|
|
|
|
DBUG_RETURN(error);
|
|
}
|
|
|
|
|
|
#if !defined(DBUG_OFF) && !defined(_lint)
|
|
static const char *
|
|
show_query_type(THD::enum_binlog_query_type qtype)
|
|
{
|
|
switch (qtype) {
|
|
case THD::ROW_QUERY_TYPE:
|
|
return "ROW";
|
|
case THD::STMT_QUERY_TYPE:
|
|
return "STMT";
|
|
case THD::MYSQL_QUERY_TYPE:
|
|
return "MYSQL";
|
|
case THD::QUERY_TYPE_COUNT:
|
|
default:
|
|
DBUG_ASSERT(0 <= qtype && qtype < THD::QUERY_TYPE_COUNT);
|
|
}
|
|
static char buf[64];
|
|
sprintf(buf, "UNKNOWN#%d", qtype);
|
|
return buf;
|
|
}
|
|
#endif
|
|
|
|
|
|
/*
|
|
Member function that will log query, either row-based or
|
|
statement-based depending on the value of the 'current_stmt_binlog_row_based'
|
|
the value of the 'qtype' flag.
|
|
|
|
This function should be called after the all calls to ha_*_row()
|
|
functions have been issued, but before tables are unlocked and
|
|
closed.
|
|
|
|
OBSERVE
|
|
There shall be no writes to any system table after calling
|
|
binlog_query(), so these writes has to be moved to before the call
|
|
of binlog_query() for correct functioning.
|
|
|
|
This is necessesary not only for RBR, but the master might crash
|
|
after binlogging the query but before changing the system tables.
|
|
This means that the slave and the master are not in the same state
|
|
(after the master has restarted), so therefore we have to
|
|
eliminate this problem.
|
|
|
|
RETURN VALUE
|
|
Error code, or 0 if no error.
|
|
*/
|
|
int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
|
|
ulong query_len, bool is_trans, bool suppress_use,
|
|
int errcode)
|
|
{
|
|
DBUG_ENTER("THD::binlog_query");
|
|
DBUG_PRINT("enter", ("qtype: %s query: '%-.*s'",
|
|
show_query_type(qtype), (int) query_len, query_arg));
|
|
DBUG_ASSERT(query_arg && mysql_bin_log.is_open());
|
|
|
|
/*
|
|
If we are not in prelocked mode, mysql_unlock_tables() will be
|
|
called after this binlog_query(), so we have to flush the pending
|
|
rows event with the STMT_END_F set to unlock all tables at the
|
|
slave side as well.
|
|
|
|
If we are in prelocked mode, the flushing will be done inside the
|
|
top-most close_thread_tables().
|
|
*/
|
|
if (this->prelocked_mode == NON_PRELOCKED)
|
|
if (int error= binlog_flush_pending_rows_event(TRUE))
|
|
DBUG_RETURN(error);
|
|
|
|
/*
|
|
If we are in statement mode and trying to log an unsafe statement,
|
|
we should print a warning.
|
|
*/
|
|
if (sql_log_bin_toplevel && lex->is_stmt_unsafe() &&
|
|
variables.binlog_format == BINLOG_FORMAT_STMT &&
|
|
binlog_filter->db_ok(this->db))
|
|
{
|
|
/*
|
|
A warning can be elevated a error when STRICT sql mode.
|
|
But we don't want to elevate binlog warning to error here.
|
|
*/
|
|
push_warning(this, MYSQL_ERROR::WARN_LEVEL_NOTE,
|
|
ER_BINLOG_UNSAFE_STATEMENT,
|
|
ER(ER_BINLOG_UNSAFE_STATEMENT));
|
|
if (global_system_variables.log_warnings &&
|
|
!(binlog_flags & BINLOG_FLAG_UNSAFE_STMT_PRINTED))
|
|
{
|
|
sql_print_warning("%s Statement: %.*s",
|
|
ER(ER_BINLOG_UNSAFE_STATEMENT),
|
|
(int) min(MYSQL_ERRMSG_SIZE, query_len), query_arg);
|
|
binlog_flags|= BINLOG_FLAG_UNSAFE_STMT_PRINTED;
|
|
}
|
|
}
|
|
|
|
switch (qtype) {
|
|
case THD::ROW_QUERY_TYPE:
|
|
DBUG_PRINT("debug",
|
|
("current_stmt_binlog_row_based: %d",
|
|
current_stmt_binlog_row_based));
|
|
if (current_stmt_binlog_row_based)
|
|
DBUG_RETURN(0);
|
|
/* Otherwise, we fall through */
|
|
case THD::MYSQL_QUERY_TYPE:
|
|
/*
|
|
Using this query type is a conveniece hack, since we have been
|
|
moving back and forth between using RBR for replication of
|
|
system tables and not using it.
|
|
|
|
Make sure to change in check_table_binlog_row_based() according
|
|
to how you treat this.
|
|
*/
|
|
case THD::STMT_QUERY_TYPE:
|
|
/*
|
|
The MYSQL_LOG::write() function will set the STMT_END_F flag and
|
|
flush the pending rows event if necessary.
|
|
*/
|
|
{
|
|
Query_log_event qinfo(this, query_arg, query_len, is_trans, suppress_use,
|
|
errcode);
|
|
/*
|
|
Binlog table maps will be irrelevant after a Query_log_event
|
|
(they are just removed on the slave side) so after the query
|
|
log event is written to the binary log, we pretend that no
|
|
table maps were written.
|
|
*/
|
|
int error= mysql_bin_log.write(&qinfo);
|
|
binlog_table_maps= 0;
|
|
DBUG_RETURN(error);
|
|
}
|
|
|
|
case THD::QUERY_TYPE_COUNT:
|
|
default:
|
|
DBUG_ASSERT(qtype < QUERY_TYPE_COUNT);
|
|
}
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
void
|
|
THD::wait_for_wakeup_ready()
|
|
{
|
|
pthread_mutex_lock(&LOCK_wakeup_ready);
|
|
while (!wakeup_ready)
|
|
pthread_cond_wait(&COND_wakeup_ready, &LOCK_wakeup_ready);
|
|
pthread_mutex_unlock(&LOCK_wakeup_ready);
|
|
}
|
|
|
|
void
|
|
THD::signal_wakeup_ready()
|
|
{
|
|
pthread_mutex_lock(&LOCK_wakeup_ready);
|
|
wakeup_ready= true;
|
|
pthread_mutex_unlock(&LOCK_wakeup_ready);
|
|
pthread_cond_signal(&COND_wakeup_ready);
|
|
}
|
|
|
|
|
|
bool Discrete_intervals_list::append(ulonglong start, ulonglong val,
|
|
ulonglong incr)
|
|
{
|
|
DBUG_ENTER("Discrete_intervals_list::append");
|
|
/* first, see if this can be merged with previous */
|
|
if ((head == NULL) || tail->merge_if_contiguous(start, val, incr))
|
|
{
|
|
/* it cannot, so need to add a new interval */
|
|
Discrete_interval *new_interval= new Discrete_interval(start, val, incr);
|
|
DBUG_RETURN(append(new_interval));
|
|
}
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
bool Discrete_intervals_list::append(Discrete_interval *new_interval)
|
|
{
|
|
DBUG_ENTER("Discrete_intervals_list::append");
|
|
if (unlikely(new_interval == NULL))
|
|
DBUG_RETURN(1);
|
|
DBUG_PRINT("info",("adding new auto_increment interval"));
|
|
if (head == NULL)
|
|
head= current= new_interval;
|
|
else
|
|
tail->next= new_interval;
|
|
tail= new_interval;
|
|
elements++;
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
#endif /* !defined(MYSQL_CLIENT) */
|