mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 20:12:31 +01:00
4213263101
* bzr merge -rtag:mariadb-10.0.10 maria/10.0.
6736 lines
194 KiB
C++
6736 lines
194 KiB
C++
/*
|
|
Copyright (c) 2000, 2013, Oracle and/or its affiliates.
|
|
Copyright (c) 2008, 2013, Monty Program Ab.
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; version 2 of the License.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
|
|
/*****************************************************************************
|
|
**
|
|
** This file implements classes defined in sql_class.h
|
|
** Especially the classes to handle a result from a select
|
|
**
|
|
*****************************************************************************/
|
|
|
|
#ifdef USE_PRAGMA_IMPLEMENTATION
|
|
#pragma implementation // gcc: Class implementation
|
|
#endif
|
|
|
|
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
|
|
#include "sql_priv.h"
|
|
#include "unireg.h" // REQUIRED: for other includes
|
|
#include "sql_class.h"
|
|
#include "sql_cache.h" // query_cache_abort
|
|
#include "sql_base.h" // close_thread_tables
|
|
#include "sql_time.h" // date_time_format_copy
|
|
#include "tztime.h" // MYSQL_TIME <-> my_time_t
|
|
#include "sql_acl.h" // NO_ACCESS,
|
|
// acl_getroot_no_password
|
|
#include "sql_base.h" // close_temporary_tables
|
|
#include "sql_handler.h" // mysql_ha_cleanup
|
|
#include "rpl_rli.h"
|
|
#include "rpl_filter.h"
|
|
#include "rpl_record.h"
|
|
#include "slave.h"
|
|
#include <my_bitmap.h>
|
|
#include "log_event.h"
|
|
#include "sql_audit.h"
|
|
#include <m_ctype.h>
|
|
#include <sys/stat.h>
|
|
#include <thr_alarm.h>
|
|
#ifdef __WIN__
|
|
#include <io.h>
|
|
#endif
|
|
#include <mysys_err.h>
|
|
#include <limits.h>
|
|
|
|
#include "sp_rcontext.h"
|
|
#include "sp_cache.h"
|
|
#include "transaction.h"
|
|
#include "sql_select.h" /* declares create_tmp_table() */
|
|
#include "debug_sync.h"
|
|
#include "sql_parse.h" // is_update_query
|
|
#include "sql_callback.h"
|
|
#include "lock.h"
|
|
#ifdef WITH_WSREP
|
|
#include "wsrep_mysqld.h"
|
|
#include "wsrep_thd.h"
|
|
#endif
|
|
#include "sql_connect.h"
|
|
|
|
/*
|
|
The following is used to initialise Table_ident with a internal
|
|
table name
|
|
*/
|
|
char internal_table_name[2]= "*";
|
|
char empty_c_string[1]= {0}; /* used for not defined db */
|
|
|
|
const char * const THD::DEFAULT_WHERE= "field list";
|
|
|
|
/****************************************************************************
|
|
** User variables
|
|
****************************************************************************/
|
|
|
|
extern "C" uchar *get_var_key(user_var_entry *entry, size_t *length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
*length= entry->name.length;
|
|
return (uchar*) entry->name.str;
|
|
}
|
|
|
|
extern "C" void free_user_var(user_var_entry *entry)
|
|
{
|
|
char *pos= (char*) entry+ALIGN_SIZE(sizeof(*entry));
|
|
if (entry->value && entry->value != pos)
|
|
my_free(entry->value);
|
|
my_free(entry);
|
|
}
|
|
|
|
bool Key_part_spec::operator==(const Key_part_spec& other) const
|
|
{
|
|
return length == other.length &&
|
|
!my_strcasecmp(system_charset_info, field_name.str,
|
|
other.field_name.str);
|
|
}
|
|
|
|
/**
|
|
Construct an (almost) deep copy of this key. Only those
|
|
elements that are known to never change are not copied.
|
|
If out of memory, a partial copy is returned and an error is set
|
|
in THD.
|
|
*/
|
|
|
|
Key::Key(const Key &rhs, MEM_ROOT *mem_root)
|
|
:type(rhs.type),
|
|
key_create_info(rhs.key_create_info),
|
|
columns(rhs.columns, mem_root),
|
|
name(rhs.name),
|
|
option_list(rhs.option_list),
|
|
generated(rhs.generated),
|
|
create_if_not_exists(rhs.create_if_not_exists)
|
|
{
|
|
list_copy_and_replace_each_value(columns, mem_root);
|
|
}
|
|
|
|
/**
|
|
Construct an (almost) deep copy of this foreign key. Only those
|
|
elements that are known to never change are not copied.
|
|
If out of memory, a partial copy is returned and an error is set
|
|
in THD.
|
|
*/
|
|
|
|
Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root)
|
|
:Key(rhs,mem_root),
|
|
ref_db(rhs.ref_db),
|
|
ref_table(rhs.ref_table),
|
|
ref_columns(rhs.ref_columns,mem_root),
|
|
delete_opt(rhs.delete_opt),
|
|
update_opt(rhs.update_opt),
|
|
match_opt(rhs.match_opt)
|
|
{
|
|
list_copy_and_replace_each_value(ref_columns, mem_root);
|
|
}
|
|
|
|
/*
|
|
Test if a foreign key (= generated key) is a prefix of the given key
|
|
(ignoring key name, key type and order of columns)
|
|
|
|
NOTES:
|
|
This is only used to test if an index for a FOREIGN KEY exists
|
|
|
|
IMPLEMENTATION
|
|
We only compare field names
|
|
|
|
RETURN
|
|
0 Generated key is a prefix of other key
|
|
1 Not equal
|
|
*/
|
|
|
|
bool foreign_key_prefix(Key *a, Key *b)
|
|
{
|
|
/* Ensure that 'a' is the generated key */
|
|
if (a->generated)
|
|
{
|
|
if (b->generated && a->columns.elements > b->columns.elements)
|
|
swap_variables(Key*, a, b); // Put shorter key in 'a'
|
|
}
|
|
else
|
|
{
|
|
if (!b->generated)
|
|
return TRUE; // No foreign key
|
|
swap_variables(Key*, a, b); // Put generated key in 'a'
|
|
}
|
|
|
|
/* Test if 'a' is a prefix of 'b' */
|
|
if (a->columns.elements > b->columns.elements)
|
|
return TRUE; // Can't be prefix
|
|
|
|
List_iterator<Key_part_spec> col_it1(a->columns);
|
|
List_iterator<Key_part_spec> col_it2(b->columns);
|
|
const Key_part_spec *col1, *col2;
|
|
|
|
#ifdef ENABLE_WHEN_INNODB_CAN_HANDLE_SWAPED_FOREIGN_KEY_COLUMNS
|
|
while ((col1= col_it1++))
|
|
{
|
|
bool found= 0;
|
|
col_it2.rewind();
|
|
while ((col2= col_it2++))
|
|
{
|
|
if (*col1 == *col2)
|
|
{
|
|
found= TRUE;
|
|
break;
|
|
}
|
|
}
|
|
if (!found)
|
|
return TRUE; // Error
|
|
}
|
|
return FALSE; // Is prefix
|
|
#else
|
|
while ((col1= col_it1++))
|
|
{
|
|
col2= col_it2++;
|
|
if (!(*col1 == *col2))
|
|
return TRUE;
|
|
}
|
|
return FALSE; // Is prefix
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
@brief
|
|
Check if the foreign key options are compatible with the specification
|
|
of the columns on which the key is created
|
|
|
|
@retval
|
|
FALSE The foreign key options are compatible with key columns
|
|
@retval
|
|
TRUE Otherwise
|
|
*/
|
|
bool Foreign_key::validate(List<Create_field> &table_fields)
|
|
{
|
|
Create_field *sql_field;
|
|
Key_part_spec *column;
|
|
List_iterator<Key_part_spec> cols(columns);
|
|
List_iterator<Create_field> it(table_fields);
|
|
DBUG_ENTER("Foreign_key::validate");
|
|
while ((column= cols++))
|
|
{
|
|
it.rewind();
|
|
while ((sql_field= it++) &&
|
|
my_strcasecmp(system_charset_info,
|
|
column->field_name.str,
|
|
sql_field->field_name)) {}
|
|
if (!sql_field)
|
|
{
|
|
my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name);
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
if (type == Key::FOREIGN_KEY && sql_field->vcol_info)
|
|
{
|
|
if (delete_opt == FK_OPTION_SET_NULL)
|
|
{
|
|
my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
|
|
"ON DELETE SET NULL");
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
if (update_opt == FK_OPTION_SET_NULL)
|
|
{
|
|
my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
|
|
"ON UPDATE SET NULL");
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
if (update_opt == FK_OPTION_CASCADE)
|
|
{
|
|
my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
|
|
"ON UPDATE CASCADE");
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
}
|
|
}
|
|
DBUG_RETURN(FALSE);
|
|
}
|
|
|
|
/****************************************************************************
|
|
** Thread specific functions
|
|
****************************************************************************/
|
|
#ifdef ONLY_FOR_MYSQL_CLOSED_SOURCE_SCHEDULED
|
|
/**
|
|
Get reference to scheduler data object
|
|
|
|
@param thd THD object
|
|
|
|
@retval Scheduler data object on THD
|
|
*/
|
|
void *thd_get_scheduler_data(THD *thd)
|
|
{
|
|
return thd->scheduler.data;
|
|
}
|
|
|
|
/**
|
|
Set reference to Scheduler data object for THD object
|
|
|
|
@param thd THD object
|
|
@param psi Scheduler data object to set on THD
|
|
*/
|
|
void thd_set_scheduler_data(THD *thd, void *data)
|
|
{
|
|
thd->scheduler.data= data;
|
|
}
|
|
|
|
/**
|
|
Get reference to Performance Schema object for THD object
|
|
|
|
@param thd THD object
|
|
|
|
@retval Performance schema object for thread on THD
|
|
*/
|
|
PSI_thread *thd_get_psi(THD *thd)
|
|
{
|
|
return thd->scheduler.m_psi;
|
|
}
|
|
|
|
/**
|
|
Get net_wait_timeout for THD object
|
|
|
|
@param thd THD object
|
|
|
|
@retval net_wait_timeout value for thread on THD
|
|
*/
|
|
ulong thd_get_net_wait_timeout(THD* thd)
|
|
{
|
|
return thd->variables.net_wait_timeout;
|
|
}
|
|
|
|
/**
|
|
Set reference to Performance Schema object for THD object
|
|
|
|
@param thd THD object
|
|
@param psi Performance schema object for thread
|
|
*/
|
|
void thd_set_psi(THD *thd, PSI_thread *psi)
|
|
{
|
|
thd->scheduler.m_psi= psi;
|
|
}
|
|
|
|
/**
|
|
Set the state on connection to killed
|
|
|
|
@param thd THD object
|
|
*/
|
|
void thd_set_killed(THD *thd)
|
|
{
|
|
thd->killed= KILL_CONNECTION;
|
|
}
|
|
|
|
/**
|
|
Clear errors from the previous THD
|
|
|
|
@param thd THD object
|
|
*/
|
|
void thd_clear_errors(THD *thd)
|
|
{
|
|
my_errno= 0;
|
|
thd->mysys_var->abort= 0;
|
|
}
|
|
|
|
/**
|
|
Set thread stack in THD object
|
|
|
|
@param thd Thread object
|
|
@param stack_start Start of stack to set in THD object
|
|
*/
|
|
void thd_set_thread_stack(THD *thd, char *stack_start)
|
|
{
|
|
thd->thread_stack= stack_start;
|
|
}
|
|
|
|
/**
|
|
Close the socket used by this connection
|
|
|
|
@param thd THD object
|
|
*/
|
|
void thd_close_connection(THD *thd)
|
|
{
|
|
if (thd->net.vio)
|
|
vio_close(thd->net.vio);
|
|
}
|
|
|
|
/**
|
|
Get current THD object from thread local data
|
|
|
|
@retval The THD object for the thread, NULL if not connection thread
|
|
*/
|
|
THD *thd_get_current_thd()
|
|
{
|
|
return current_thd;
|
|
}
|
|
|
|
/**
|
|
Lock data that needs protection in THD object
|
|
|
|
@param thd THD object
|
|
*/
|
|
void thd_lock_data(THD *thd)
|
|
{
|
|
mysql_mutex_lock(&thd->LOCK_thd_data);
|
|
}
|
|
|
|
/**
|
|
Unlock data that needs protection in THD object
|
|
|
|
@param thd THD object
|
|
*/
|
|
void thd_unlock_data(THD *thd)
|
|
{
|
|
mysql_mutex_unlock(&thd->LOCK_thd_data);
|
|
}
|
|
|
|
/**
|
|
Support method to check if connection has already started transcaction
|
|
|
|
@param client_cntx Low level client context
|
|
|
|
@retval TRUE if connection already started transaction
|
|
*/
|
|
bool thd_is_transaction_active(THD *thd)
|
|
{
|
|
return thd->transaction.is_active();
|
|
}
|
|
|
|
/**
|
|
Check if there is buffered data on the socket representing the connection
|
|
|
|
@param thd THD object
|
|
*/
|
|
int thd_connection_has_data(THD *thd)
|
|
{
|
|
Vio *vio= thd->net.vio;
|
|
return vio->has_data(vio);
|
|
}
|
|
|
|
/**
|
|
Set reading/writing on socket, used by SHOW PROCESSLIST
|
|
|
|
@param thd THD object
|
|
@param val Value to set it to (0 or 1)
|
|
*/
|
|
void thd_set_net_read_write(THD *thd, uint val)
|
|
{
|
|
thd->net.reading_or_writing= val;
|
|
}
|
|
|
|
/**
|
|
Get reading/writing on socket from THD object
|
|
@param thd THD object
|
|
|
|
@retval net.reading_or_writing value for thread on THD.
|
|
*/
|
|
uint thd_get_net_read_write(THD *thd)
|
|
{
|
|
return thd->net.reading_or_writing;
|
|
}
|
|
|
|
/**
|
|
Set reference to mysys variable in THD object
|
|
|
|
@param thd THD object
|
|
@param mysys_var Reference to set
|
|
*/
|
|
void thd_set_mysys_var(THD *thd, st_my_thread_var *mysys_var)
|
|
{
|
|
thd->set_mysys_var(mysys_var);
|
|
}
|
|
|
|
/**
|
|
Get socket file descriptor for this connection
|
|
|
|
@param thd THD object
|
|
|
|
@retval Socket of the connection
|
|
*/
|
|
my_socket thd_get_fd(THD *thd)
|
|
{
|
|
return mysql_socket_getfd(thd->net.vio->mysql_socket);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
Get thread attributes for connection threads
|
|
|
|
@retval Reference to thread attribute for connection threads
|
|
*/
|
|
pthread_attr_t *get_connection_attrib(void)
|
|
{
|
|
return &connection_attrib;
|
|
}
|
|
|
|
/**
|
|
Get max number of connections
|
|
|
|
@retval Max number of connections for MySQL Server
|
|
*/
|
|
ulong get_max_connections(void)
|
|
{
|
|
return max_connections;
|
|
}
|
|
|
|
/*
|
|
The following functions form part of the C plugin API
|
|
*/
|
|
|
|
extern "C" int mysql_tmpfile(const char *prefix)
|
|
{
|
|
char filename[FN_REFLEN];
|
|
File fd = create_temp_file(filename, mysql_tmpdir, prefix,
|
|
#ifdef __WIN__
|
|
O_BINARY | O_TRUNC | O_SEQUENTIAL |
|
|
O_SHORT_LIVED |
|
|
#endif /* __WIN__ */
|
|
O_CREAT | O_EXCL | O_RDWR | O_TEMPORARY,
|
|
MYF(MY_WME));
|
|
if (fd >= 0) {
|
|
#ifndef __WIN__
|
|
/*
|
|
This can be removed once the following bug is fixed:
|
|
Bug #28903 create_temp_file() doesn't honor O_TEMPORARY option
|
|
(file not removed) (Unix)
|
|
*/
|
|
unlink(filename);
|
|
#endif /* !__WIN__ */
|
|
}
|
|
|
|
return fd;
|
|
}
|
|
|
|
|
|
extern "C"
|
|
int thd_in_lock_tables(const THD *thd)
|
|
{
|
|
return MY_TEST(thd->in_lock_tables);
|
|
}
|
|
|
|
|
|
extern "C"
|
|
int thd_tablespace_op(const THD *thd)
|
|
{
|
|
return MY_TEST(thd->tablespace_op);
|
|
}
|
|
|
|
extern "C"
|
|
const char *set_thd_proc_info(THD *thd_arg, const char *info,
|
|
const char *calling_function,
|
|
const char *calling_file,
|
|
const unsigned int calling_line)
|
|
{
|
|
PSI_stage_info old_stage;
|
|
PSI_stage_info new_stage;
|
|
|
|
old_stage.m_key= 0;
|
|
old_stage.m_name= info;
|
|
|
|
set_thd_stage_info(thd_arg, & old_stage, & new_stage,
|
|
calling_function, calling_file, calling_line);
|
|
|
|
return new_stage.m_name;
|
|
}
|
|
|
|
extern "C"
|
|
void set_thd_stage_info(void *thd_arg,
|
|
const PSI_stage_info *new_stage,
|
|
PSI_stage_info *old_stage,
|
|
const char *calling_func,
|
|
const char *calling_file,
|
|
const unsigned int calling_line)
|
|
{
|
|
THD *thd= (THD*) thd_arg;
|
|
if (thd == NULL)
|
|
thd= current_thd;
|
|
|
|
thd->enter_stage(new_stage, old_stage, calling_func, calling_file,
|
|
calling_line);
|
|
}
|
|
|
|
void THD::enter_stage(const PSI_stage_info *new_stage,
|
|
PSI_stage_info *old_stage,
|
|
const char *calling_func,
|
|
const char *calling_file,
|
|
const unsigned int calling_line)
|
|
{
|
|
DBUG_PRINT("THD::enter_stage", ("%s:%d", calling_file, calling_line));
|
|
|
|
if (old_stage != NULL)
|
|
{
|
|
old_stage->m_key= m_current_stage_key;
|
|
old_stage->m_name= proc_info;
|
|
}
|
|
|
|
if (new_stage != NULL)
|
|
{
|
|
const char *msg= new_stage->m_name;
|
|
|
|
#if defined(ENABLED_PROFILING)
|
|
profiling.status_change(msg, calling_func, calling_file, calling_line);
|
|
#endif
|
|
|
|
m_current_stage_key= new_stage->m_key;
|
|
proc_info= msg;
|
|
|
|
#ifdef HAVE_PSI_THREAD_INTERFACE
|
|
PSI_THREAD_CALL(set_thread_state)(msg);
|
|
MYSQL_SET_STAGE(m_current_stage_key, calling_file, calling_line);
|
|
#endif
|
|
}
|
|
return;
|
|
}
|
|
|
|
void thd_enter_cond(MYSQL_THD thd, mysql_cond_t *cond, mysql_mutex_t *mutex,
|
|
const PSI_stage_info *stage, PSI_stage_info *old_stage,
|
|
const char *src_function, const char *src_file,
|
|
int src_line)
|
|
{
|
|
if (!thd)
|
|
thd= current_thd;
|
|
|
|
return thd->enter_cond(cond, mutex, stage, old_stage, src_function, src_file,
|
|
src_line);
|
|
}
|
|
|
|
void thd_exit_cond(MYSQL_THD thd, const PSI_stage_info *stage,
|
|
const char *src_function, const char *src_file,
|
|
int src_line)
|
|
{
|
|
if (!thd)
|
|
thd= current_thd;
|
|
|
|
thd->exit_cond(stage, src_function, src_file, src_line);
|
|
return;
|
|
}
|
|
|
|
extern "C"
|
|
void **thd_ha_data(const THD *thd, const struct handlerton *hton)
|
|
{
|
|
return (void **) &thd->ha_data[hton->slot].ha_ptr;
|
|
}
|
|
|
|
extern "C"
|
|
void thd_storage_lock_wait(THD *thd, long long value)
|
|
{
|
|
thd->utime_after_lock+= value;
|
|
}
|
|
|
|
/**
|
|
Provide a handler data getter to simplify coding
|
|
*/
|
|
extern "C"
|
|
void *thd_get_ha_data(const THD *thd, const struct handlerton *hton)
|
|
{
|
|
return *thd_ha_data(thd, hton);
|
|
}
|
|
|
|
|
|
/**
|
|
Provide a handler data setter to simplify coding
|
|
@see thd_set_ha_data() definition in plugin.h
|
|
*/
|
|
extern "C"
|
|
void thd_set_ha_data(THD *thd, const struct handlerton *hton,
|
|
const void *ha_data)
|
|
{
|
|
plugin_ref *lock= &thd->ha_data[hton->slot].lock;
|
|
if (ha_data && !*lock)
|
|
*lock= ha_lock_engine(NULL, (handlerton*) hton);
|
|
else if (!ha_data && *lock)
|
|
{
|
|
plugin_unlock(NULL, *lock);
|
|
*lock= NULL;
|
|
}
|
|
*thd_ha_data(thd, hton)= (void*) ha_data;
|
|
}
|
|
|
|
|
|
/**
|
|
Allow storage engine to wakeup commits waiting in THD::wait_for_prior_commit.
|
|
@see thd_wakeup_subsequent_commits() definition in plugin.h
|
|
*/
|
|
extern "C"
|
|
void thd_wakeup_subsequent_commits(THD *thd, int wakeup_error)
|
|
{
|
|
thd->wakeup_subsequent_commits(wakeup_error);
|
|
}
|
|
|
|
|
|
extern "C"
|
|
long long thd_test_options(const THD *thd, long long test_options)
|
|
{
|
|
return thd->variables.option_bits & test_options;
|
|
}
|
|
|
|
extern "C"
|
|
int thd_sql_command(const THD *thd)
|
|
{
|
|
return (int) thd->lex->sql_command;
|
|
}
|
|
|
|
extern "C"
|
|
int thd_tx_isolation(const THD *thd)
|
|
{
|
|
return (int) thd->tx_isolation;
|
|
}
|
|
|
|
extern "C"
|
|
int thd_tx_is_read_only(const THD *thd)
|
|
{
|
|
return (int) thd->tx_read_only;
|
|
}
|
|
|
|
|
|
extern "C"
|
|
{ /* Functions for thd_error_context_service */
|
|
|
|
const char *thd_get_error_message(const THD *thd)
|
|
{
|
|
return thd->get_stmt_da()->message();
|
|
}
|
|
|
|
uint thd_get_error_number(const THD *thd)
|
|
{
|
|
return thd->get_stmt_da()->sql_errno();
|
|
}
|
|
|
|
ulong thd_get_error_row(const THD *thd)
|
|
{
|
|
return thd->get_stmt_da()->current_row_for_warning();
|
|
}
|
|
|
|
void thd_inc_error_row(THD *thd)
|
|
{
|
|
thd->get_stmt_da()->inc_current_row_for_warning();
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
Dumps a text description of a thread, its security context
|
|
(user, host) and the current query.
|
|
|
|
@param thd thread context
|
|
@param buffer pointer to preferred result buffer
|
|
@param length length of buffer
|
|
@param max_query_len how many chars of query to copy (0 for all)
|
|
|
|
@req LOCK_thread_count
|
|
|
|
@note LOCK_thread_count mutex is not necessary when the function is invoked on
|
|
the currently running thread (current_thd) or if the caller in some other
|
|
way guarantees that access to thd->query is serialized.
|
|
|
|
@return Pointer to string
|
|
*/
|
|
|
|
extern "C"
|
|
char *thd_get_error_context_description(THD *thd, char *buffer,
|
|
unsigned int length,
|
|
unsigned int max_query_len)
|
|
{
|
|
String str(buffer, length, &my_charset_latin1);
|
|
const Security_context *sctx= &thd->main_security_ctx;
|
|
char header[256];
|
|
int len;
|
|
/*
|
|
The pointers thd->query and thd->proc_info might change since they are
|
|
being modified concurrently. This is acceptable for proc_info since its
|
|
values doesn't have to very accurate and the memory it points to is static,
|
|
but we need to attempt a snapshot on the pointer values to avoid using NULL
|
|
values. The pointer to thd->query however, doesn't point to static memory
|
|
and has to be protected by thd->LOCK_thd_data or risk pointing to
|
|
uninitialized memory.
|
|
*/
|
|
const char *proc_info= thd->proc_info;
|
|
|
|
len= my_snprintf(header, sizeof(header),
|
|
"MySQL thread id %lu, OS thread handle 0x%lx, query id %lu",
|
|
thd->thread_id, (ulong) thd->real_id, (ulong) thd->query_id);
|
|
str.length(0);
|
|
str.append(header, len);
|
|
|
|
if (sctx->host)
|
|
{
|
|
str.append(' ');
|
|
str.append(sctx->host);
|
|
}
|
|
|
|
if (sctx->ip)
|
|
{
|
|
str.append(' ');
|
|
str.append(sctx->ip);
|
|
}
|
|
|
|
if (sctx->user)
|
|
{
|
|
str.append(' ');
|
|
str.append(sctx->user);
|
|
}
|
|
|
|
if (proc_info)
|
|
{
|
|
str.append(' ');
|
|
str.append(proc_info);
|
|
}
|
|
|
|
/* Don't wait if LOCK_thd_data is used as this could cause a deadlock */
|
|
if (!mysql_mutex_trylock(&thd->LOCK_thd_data))
|
|
{
|
|
if (thd->query())
|
|
{
|
|
if (max_query_len < 1)
|
|
len= thd->query_length();
|
|
else
|
|
len= MY_MIN(thd->query_length(), max_query_len);
|
|
str.append('\n');
|
|
str.append(thd->query(), len);
|
|
}
|
|
mysql_mutex_unlock(&thd->LOCK_thd_data);
|
|
}
|
|
|
|
if (str.c_ptr_safe() == buffer)
|
|
return buffer;
|
|
|
|
/*
|
|
We have to copy the new string to the destination buffer because the string
|
|
was reallocated to a larger buffer to be able to fit.
|
|
*/
|
|
DBUG_ASSERT(buffer != NULL);
|
|
length= MY_MIN(str.length(), length-1);
|
|
memcpy(buffer, str.c_ptr_quick(), length);
|
|
/* Make sure that the new string is null terminated */
|
|
buffer[length]= '\0';
|
|
return buffer;
|
|
}
|
|
|
|
#ifdef WITH_WSREP
|
|
extern int wsrep_on(void *thd)
|
|
{
|
|
return (int)(WSREP(((THD*)thd)));
|
|
}
|
|
extern "C" bool wsrep_thd_is_wsrep_on(THD *thd)
|
|
{
|
|
return thd->variables.wsrep_on;
|
|
}
|
|
|
|
extern "C" bool wsrep_consistency_check(void *thd)
|
|
{
|
|
return ((THD*)thd)->wsrep_consistency_check == CONSISTENCY_CHECK_RUNNING;
|
|
}
|
|
|
|
extern "C" void wsrep_thd_set_exec_mode(THD *thd, enum wsrep_exec_mode mode)
|
|
{
|
|
thd->wsrep_exec_mode= mode;
|
|
}
|
|
extern "C" void wsrep_thd_set_query_state(
|
|
THD *thd, enum wsrep_query_state state)
|
|
{
|
|
thd->wsrep_query_state= state;
|
|
}
|
|
extern "C" void wsrep_thd_set_conflict_state(
|
|
THD *thd, enum wsrep_conflict_state state)
|
|
{
|
|
thd->wsrep_conflict_state= state;
|
|
}
|
|
|
|
|
|
extern "C" enum wsrep_exec_mode wsrep_thd_exec_mode(THD *thd)
|
|
{
|
|
return thd->wsrep_exec_mode;
|
|
}
|
|
|
|
extern "C" const char *wsrep_thd_exec_mode_str(THD *thd)
|
|
{
|
|
return
|
|
(!thd) ? "void" :
|
|
(thd->wsrep_exec_mode == LOCAL_STATE) ? "local" :
|
|
(thd->wsrep_exec_mode == REPL_RECV) ? "applier" :
|
|
(thd->wsrep_exec_mode == TOTAL_ORDER) ? "total order" :
|
|
(thd->wsrep_exec_mode == LOCAL_COMMIT) ? "local commit" : "void";
|
|
}
|
|
|
|
extern "C" enum wsrep_query_state wsrep_thd_query_state(THD *thd)
|
|
{
|
|
return thd->wsrep_query_state;
|
|
}
|
|
|
|
extern "C" const char *wsrep_thd_query_state_str(THD *thd)
|
|
{
|
|
return
|
|
(!thd) ? "void" :
|
|
(thd->wsrep_query_state == QUERY_IDLE) ? "idle" :
|
|
(thd->wsrep_query_state == QUERY_EXEC) ? "executing" :
|
|
(thd->wsrep_query_state == QUERY_COMMITTING) ? "committing" :
|
|
(thd->wsrep_query_state == QUERY_EXITING) ? "exiting" :
|
|
(thd->wsrep_query_state == QUERY_ROLLINGBACK) ? "rolling back" : "void";
|
|
}
|
|
|
|
extern "C" enum wsrep_conflict_state wsrep_thd_conflict_state(THD *thd)
|
|
{
|
|
return thd->wsrep_conflict_state;
|
|
}
|
|
extern "C" const char *wsrep_thd_conflict_state_str(THD *thd)
|
|
{
|
|
return
|
|
(!thd) ? "void" :
|
|
(thd->wsrep_conflict_state == NO_CONFLICT) ? "no conflict" :
|
|
(thd->wsrep_conflict_state == MUST_ABORT) ? "must abort" :
|
|
(thd->wsrep_conflict_state == ABORTING) ? "aborting" :
|
|
(thd->wsrep_conflict_state == MUST_REPLAY) ? "must replay" :
|
|
(thd->wsrep_conflict_state == REPLAYING) ? "replaying" :
|
|
(thd->wsrep_conflict_state == RETRY_AUTOCOMMIT) ? "retrying" :
|
|
(thd->wsrep_conflict_state == CERT_FAILURE) ? "cert failure" : "void";
|
|
}
|
|
|
|
extern "C" wsrep_ws_handle_t* wsrep_thd_ws_handle(THD *thd)
|
|
{
|
|
return &thd->wsrep_ws_handle;
|
|
}
|
|
|
|
extern "C" void wsrep_thd_LOCK(THD *thd)
|
|
{
|
|
mysql_mutex_lock(&thd->LOCK_wsrep_thd);
|
|
}
|
|
extern "C" void wsrep_thd_UNLOCK(THD *thd)
|
|
{
|
|
mysql_mutex_unlock(&thd->LOCK_wsrep_thd);
|
|
}
|
|
extern "C" time_t wsrep_thd_query_start(THD *thd)
|
|
{
|
|
return thd->query_start();
|
|
}
|
|
extern "C" uint32 wsrep_thd_wsrep_rand(THD *thd)
|
|
{
|
|
return thd->wsrep_rand;
|
|
}
|
|
extern "C" my_thread_id wsrep_thd_thread_id(THD *thd)
|
|
{
|
|
return thd->thread_id;
|
|
}
|
|
extern "C" wsrep_seqno_t wsrep_thd_trx_seqno(THD *thd)
|
|
{
|
|
return (thd) ? thd->wsrep_trx_meta.gtid.seqno : WSREP_SEQNO_UNDEFINED;
|
|
}
|
|
extern "C" query_id_t wsrep_thd_query_id(THD *thd)
|
|
{
|
|
return thd->query_id;
|
|
}
|
|
extern "C" char *wsrep_thd_query(THD *thd)
|
|
{
|
|
return (thd) ? thd->query() : NULL;
|
|
}
|
|
extern "C" query_id_t wsrep_thd_wsrep_last_query_id(THD *thd)
|
|
{
|
|
return thd->wsrep_last_query_id;
|
|
}
|
|
extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id)
|
|
{
|
|
thd->wsrep_last_query_id= id;
|
|
}
|
|
extern "C" void wsrep_thd_awake(THD *thd, my_bool signal)
|
|
{
|
|
if (signal)
|
|
{
|
|
mysql_mutex_lock(&thd->LOCK_thd_data);
|
|
thd->awake(KILL_QUERY);
|
|
mysql_mutex_unlock(&thd->LOCK_thd_data);
|
|
}
|
|
else
|
|
{
|
|
mysql_mutex_lock(&LOCK_wsrep_replaying);
|
|
mysql_cond_broadcast(&COND_wsrep_replaying);
|
|
mysql_mutex_unlock(&LOCK_wsrep_replaying);
|
|
}
|
|
}
|
|
extern "C" int wsrep_thd_retry_counter(THD *thd)
|
|
{
|
|
return(thd->wsrep_retry_counter);
|
|
}
|
|
|
|
extern int
|
|
wsrep_trx_order_before(void *thd1, void *thd2)
|
|
{
|
|
if (wsrep_thd_trx_seqno((THD*)thd1) < wsrep_thd_trx_seqno((THD*)thd2)) {
|
|
WSREP_DEBUG("BF conflict, order: %lld %lld\n",
|
|
(long long)wsrep_thd_trx_seqno((THD*)thd1),
|
|
(long long)wsrep_thd_trx_seqno((THD*)thd2));
|
|
return 1;
|
|
}
|
|
WSREP_DEBUG("waiting for BF, trx order: %lld %lld\n",
|
|
(long long)wsrep_thd_trx_seqno((THD*)thd1),
|
|
(long long)wsrep_thd_trx_seqno((THD*)thd2));
|
|
return 0;
|
|
}
|
|
extern "C" int
|
|
wsrep_trx_is_aborting(void *thd_ptr)
|
|
{
|
|
if (thd_ptr) {
|
|
if ((((THD *)thd_ptr)->wsrep_conflict_state == MUST_ABORT) ||
|
|
(((THD *)thd_ptr)->wsrep_conflict_state == ABORTING)) {
|
|
return 1;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#if MARIA_PLUGIN_INTERFACE_VERSION < 0x0200
|
|
/**
|
|
TODO: This function is for API compatibility, remove it eventually.
|
|
All engines should switch to use thd_get_error_context_description()
|
|
plugin service function.
|
|
*/
|
|
extern "C"
|
|
char *thd_security_context(THD *thd,
|
|
char *buffer, unsigned int length,
|
|
unsigned int max_query_len)
|
|
{
|
|
return thd_get_error_context_description(thd, buffer, length, max_query_len);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
Implementation of Drop_table_error_handler::handle_condition().
|
|
The reason in having this implementation is to silence technical low-level
|
|
warnings during DROP TABLE operation. Currently we don't want to expose
|
|
the following warnings during DROP TABLE:
|
|
- Some of table files are missed or invalid (the table is going to be
|
|
deleted anyway, so why bother that something was missed);
|
|
- A trigger associated with the table does not have DEFINER (One of the
|
|
MySQL specifics now is that triggers are loaded for the table being
|
|
dropped. So, we may have a warning that trigger does not have DEFINER
|
|
attribute during DROP TABLE operation).
|
|
|
|
@return TRUE if the condition is handled.
|
|
*/
|
|
bool Drop_table_error_handler::handle_condition(THD *thd,
|
|
uint sql_errno,
|
|
const char* sqlstate,
|
|
Sql_condition::enum_warning_level level,
|
|
const char* msg,
|
|
Sql_condition ** cond_hdl)
|
|
{
|
|
*cond_hdl= NULL;
|
|
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
|
|
sql_errno == ER_TRG_NO_DEFINER);
|
|
}
|
|
|
|
|
|
#ifdef WITH_WSREP
|
|
THD::THD(bool is_applier)
|
|
#else
|
|
THD::THD()
|
|
#endif
|
|
:Statement(&main_lex, &main_mem_root, STMT_CONVENTIONAL_EXECUTION,
|
|
/* statement id */ 0),
|
|
rli_fake(0), rgi_fake(0), rgi_slave(NULL),
|
|
in_sub_stmt(0), log_all_errors(0),
|
|
binlog_unsafe_warning_flags(0),
|
|
binlog_table_maps(0),
|
|
table_map_for_update(0),
|
|
arg_of_last_insert_id_function(FALSE),
|
|
first_successful_insert_id_in_prev_stmt(0),
|
|
first_successful_insert_id_in_prev_stmt_for_binlog(0),
|
|
first_successful_insert_id_in_cur_stmt(0),
|
|
stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
|
|
m_examined_row_count(0),
|
|
accessed_rows_and_keys(0),
|
|
m_statement_psi(NULL),
|
|
m_idle_psi(NULL),
|
|
m_server_idle(false),
|
|
thread_id(0),
|
|
global_disable_checkpoint(0),
|
|
failed_com_change_user(0),
|
|
is_fatal_error(0),
|
|
transaction_rollback_request(0),
|
|
is_fatal_sub_stmt_error(0),
|
|
rand_used(0),
|
|
time_zone_used(0),
|
|
in_lock_tables(0),
|
|
bootstrap(0),
|
|
derived_tables_processing(FALSE),
|
|
spcont(NULL),
|
|
#ifdef WITH_WSREP
|
|
wsrep_applier(is_applier),
|
|
wsrep_applier_closing(FALSE),
|
|
wsrep_client_thread(0),
|
|
wsrep_po_handle(WSREP_PO_INITIALIZER),
|
|
wsrep_po_cnt(0),
|
|
wsrep_po_in_trans(FALSE),
|
|
wsrep_apply_format(0),
|
|
wsrep_apply_toi(false),
|
|
#endif
|
|
m_parser_state(NULL),
|
|
#if defined(ENABLED_DEBUG_SYNC)
|
|
debug_sync_control(0),
|
|
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
|
wait_for_commit_ptr(0),
|
|
main_da(0, false, false),
|
|
m_stmt_da(&main_da)
|
|
{
|
|
ulong tmp;
|
|
|
|
mdl_context.init(this);
|
|
/*
|
|
We set THR_THD to temporally point to this THD to register all the
|
|
variables that allocates memory for this THD
|
|
*/
|
|
THD *old_THR_THD= current_thd;
|
|
set_current_thd(this);
|
|
status_var.memory_used= 0;
|
|
main_da.init();
|
|
|
|
/*
|
|
Pass nominal parameters to init_alloc_root only to ensure that
|
|
the destructor works OK in case of an error. The main_mem_root
|
|
will be re-initialized in init_for_queries().
|
|
*/
|
|
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0,
|
|
MYF(MY_THREAD_SPECIFIC));
|
|
|
|
stmt_arena= this;
|
|
thread_stack= 0;
|
|
scheduler= thread_scheduler; // Will be fixed later
|
|
event_scheduler.data= 0;
|
|
event_scheduler.m_psi= 0;
|
|
skip_wait_timeout= false;
|
|
extra_port= 0;
|
|
catalog= (char*)"std"; // the only catalog we have for now
|
|
main_security_ctx.init();
|
|
security_ctx= &main_security_ctx;
|
|
no_errors= 0;
|
|
password= 0;
|
|
query_start_used= query_start_sec_part_used= 0;
|
|
count_cuted_fields= CHECK_FIELD_IGNORE;
|
|
killed= NOT_KILLED;
|
|
col_access=0;
|
|
is_slave_error= thread_specific_used= FALSE;
|
|
my_hash_clear(&handler_tables_hash);
|
|
my_hash_clear(&ull_hash);
|
|
tmp_table=0;
|
|
cuted_fields= 0L;
|
|
m_sent_row_count= 0L;
|
|
limit_found_rows= 0;
|
|
m_row_count_func= -1;
|
|
statement_id_counter= 0UL;
|
|
// Must be reset to handle error with THD's created for init of mysqld
|
|
lex->current_select= 0;
|
|
user_time.val= start_time= start_time_sec_part= 0;
|
|
start_utime= prior_thr_create_utime= 0L;
|
|
utime_after_lock= 0L;
|
|
progress.arena= 0;
|
|
progress.report_to_client= 0;
|
|
progress.max_counter= 0;
|
|
current_linfo = 0;
|
|
slave_thread = 0;
|
|
connection_name.str= 0;
|
|
connection_name.length= 0;
|
|
|
|
bzero(&variables, sizeof(variables));
|
|
one_shot_set= 0;
|
|
file_id = 0;
|
|
query_id= 0;
|
|
query_name_consts= 0;
|
|
db_charset= global_system_variables.collation_database;
|
|
bzero(ha_data, sizeof(ha_data));
|
|
mysys_var=0;
|
|
binlog_evt_union.do_union= FALSE;
|
|
enable_slow_log= 0;
|
|
durability_property= HA_REGULAR_DURABILITY;
|
|
|
|
#ifndef DBUG_OFF
|
|
dbug_sentry=THD_SENTRY_MAGIC;
|
|
#endif
|
|
#ifndef EMBEDDED_LIBRARY
|
|
mysql_audit_init_thd(this);
|
|
#endif
|
|
net.vio=0;
|
|
net.buff= 0;
|
|
client_capabilities= 0; // minimalistic client
|
|
system_thread= NON_SYSTEM_THREAD;
|
|
cleanup_done= abort_on_warning= 0;
|
|
peer_port= 0; // For SHOW PROCESSLIST
|
|
transaction.m_pending_rows_event= 0;
|
|
transaction.on= 1;
|
|
wt_thd_lazy_init(&transaction.wt, &variables.wt_deadlock_search_depth_short,
|
|
&variables.wt_timeout_short,
|
|
&variables.wt_deadlock_search_depth_long,
|
|
&variables.wt_timeout_long);
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
active_vio = 0;
|
|
#endif
|
|
mysql_mutex_init(key_LOCK_thd_data, &LOCK_thd_data, MY_MUTEX_INIT_FAST);
|
|
mysql_mutex_init(key_LOCK_wakeup_ready, &LOCK_wakeup_ready, MY_MUTEX_INIT_FAST);
|
|
mysql_cond_init(key_COND_wakeup_ready, &COND_wakeup_ready, 0);
|
|
/*
|
|
LOCK_thread_count goes before LOCK_thd_data - the former is called around
|
|
'delete thd', the latter - in THD::~THD
|
|
*/
|
|
mysql_mutex_record_order(&LOCK_thread_count, &LOCK_thd_data);
|
|
|
|
/* Variables with default values */
|
|
proc_info="login";
|
|
where= THD::DEFAULT_WHERE;
|
|
variables.server_id = global_system_variables.server_id;
|
|
slave_net = 0;
|
|
m_command=COM_CONNECT;
|
|
*scramble= '\0';
|
|
|
|
#ifdef WITH_WSREP
|
|
mysql_mutex_init(key_LOCK_wsrep_thd, &LOCK_wsrep_thd, MY_MUTEX_INIT_FAST);
|
|
mysql_cond_init(key_COND_wsrep_thd, &COND_wsrep_thd, NULL);
|
|
wsrep_ws_handle.trx_id = WSREP_UNDEFINED_TRX_ID;
|
|
wsrep_ws_handle.opaque = NULL;
|
|
wsrep_retry_counter = 0;
|
|
wsrep_PA_safe = true;
|
|
wsrep_retry_query = NULL;
|
|
wsrep_retry_query_len = 0;
|
|
wsrep_retry_command = COM_CONNECT;
|
|
wsrep_consistency_check = NO_CONSISTENCY_CHECK;
|
|
wsrep_status_vars = 0;
|
|
wsrep_mysql_replicated = 0;
|
|
wsrep_TOI_pre_query = NULL;
|
|
wsrep_TOI_pre_query_len = 0;
|
|
#endif
|
|
/* Call to init() below requires fully initialized Open_tables_state. */
|
|
reset_open_tables_state(this);
|
|
|
|
init();
|
|
#if defined(ENABLED_PROFILING)
|
|
profiling.set_thd(this);
|
|
#endif
|
|
user_connect=(USER_CONN *)0;
|
|
my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
|
|
(my_hash_get_key) get_var_key,
|
|
(my_hash_free_key) free_user_var, HASH_THREAD_SPECIFIC);
|
|
|
|
sp_proc_cache= NULL;
|
|
sp_func_cache= NULL;
|
|
|
|
/* For user vars replication*/
|
|
if (opt_bin_log)
|
|
my_init_dynamic_array(&user_var_events,
|
|
sizeof(BINLOG_USER_VAR_EVENT *), 16, 16, MYF(0));
|
|
else
|
|
bzero((char*) &user_var_events, sizeof(user_var_events));
|
|
|
|
/* Protocol */
|
|
protocol= &protocol_text; // Default protocol
|
|
protocol_text.init(this);
|
|
protocol_binary.init(this);
|
|
|
|
tablespace_op=FALSE;
|
|
|
|
/*
|
|
Initialize the random generator. We call my_rnd() without a lock as
|
|
it's not really critical if two threads modifies the structure at the
|
|
same time. We ensure that we have an unique number foreach thread
|
|
by adding the address of the stack.
|
|
*/
|
|
tmp= (ulong) (my_rnd(&sql_rand) * 0xffffffff);
|
|
my_rnd_init(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
|
|
substitute_null_with_insert_id = FALSE;
|
|
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
|
|
#ifdef WITH_WSREP
|
|
lock_info.mysql_thd= (void *)this;
|
|
lock_info.in_lock_tables= false;
|
|
#ifdef WSREP_PROC_INFO
|
|
wsrep_info[sizeof(wsrep_info) - 1] = '\0'; /* make sure it is 0-terminated */
|
|
#endif /* WSREP_PROC_INFO */
|
|
#endif /* WITH_WSREP */
|
|
|
|
m_internal_handler= NULL;
|
|
m_binlog_invoker= INVOKER_NONE;
|
|
arena_for_cached_items= 0;
|
|
memset(&invoker_user, 0, sizeof(invoker_user));
|
|
memset(&invoker_host, 0, sizeof(invoker_host));
|
|
prepare_derived_at_open= FALSE;
|
|
create_tmp_table_for_derived= FALSE;
|
|
save_prep_leaf_list= FALSE;
|
|
/* Restore THR_THD */
|
|
set_current_thd(old_THR_THD);
|
|
}
|
|
|
|
|
|
void THD::push_internal_handler(Internal_error_handler *handler)
|
|
{
|
|
DBUG_ENTER("THD::push_internal_handler");
|
|
if (m_internal_handler)
|
|
{
|
|
handler->m_prev_internal_handler= m_internal_handler;
|
|
m_internal_handler= handler;
|
|
}
|
|
else
|
|
{
|
|
m_internal_handler= handler;
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
bool THD::handle_condition(uint sql_errno,
|
|
const char* sqlstate,
|
|
Sql_condition::enum_warning_level level,
|
|
const char* msg,
|
|
Sql_condition ** cond_hdl)
|
|
{
|
|
if (!m_internal_handler)
|
|
{
|
|
*cond_hdl= NULL;
|
|
return FALSE;
|
|
}
|
|
|
|
for (Internal_error_handler *error_handler= m_internal_handler;
|
|
error_handler;
|
|
error_handler= error_handler->m_prev_internal_handler)
|
|
{
|
|
if (error_handler->handle_condition(this, sql_errno, sqlstate, level, msg,
|
|
cond_hdl))
|
|
{
|
|
return TRUE;
|
|
}
|
|
}
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
Internal_error_handler *THD::pop_internal_handler()
|
|
{
|
|
DBUG_ENTER("THD::pop_internal_handler");
|
|
DBUG_ASSERT(m_internal_handler != NULL);
|
|
Internal_error_handler *popped_handler= m_internal_handler;
|
|
m_internal_handler= m_internal_handler->m_prev_internal_handler;
|
|
DBUG_RETURN(popped_handler);
|
|
}
|
|
|
|
|
|
void THD::raise_error(uint sql_errno)
|
|
{
|
|
const char* msg= ER(sql_errno);
|
|
(void) raise_condition(sql_errno,
|
|
NULL,
|
|
Sql_condition::WARN_LEVEL_ERROR,
|
|
msg);
|
|
}
|
|
|
|
void THD::raise_error_printf(uint sql_errno, ...)
|
|
{
|
|
va_list args;
|
|
char ebuff[MYSQL_ERRMSG_SIZE];
|
|
DBUG_ENTER("THD::raise_error_printf");
|
|
DBUG_PRINT("my", ("nr: %d errno: %d", sql_errno, errno));
|
|
const char* format= ER(sql_errno);
|
|
va_start(args, sql_errno);
|
|
my_vsnprintf(ebuff, sizeof(ebuff), format, args);
|
|
va_end(args);
|
|
(void) raise_condition(sql_errno,
|
|
NULL,
|
|
Sql_condition::WARN_LEVEL_ERROR,
|
|
ebuff);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
void THD::raise_warning(uint sql_errno)
|
|
{
|
|
const char* msg= ER(sql_errno);
|
|
(void) raise_condition(sql_errno,
|
|
NULL,
|
|
Sql_condition::WARN_LEVEL_WARN,
|
|
msg);
|
|
}
|
|
|
|
void THD::raise_warning_printf(uint sql_errno, ...)
|
|
{
|
|
va_list args;
|
|
char ebuff[MYSQL_ERRMSG_SIZE];
|
|
DBUG_ENTER("THD::raise_warning_printf");
|
|
DBUG_PRINT("enter", ("warning: %u", sql_errno));
|
|
const char* format= ER(sql_errno);
|
|
va_start(args, sql_errno);
|
|
my_vsnprintf(ebuff, sizeof(ebuff), format, args);
|
|
va_end(args);
|
|
(void) raise_condition(sql_errno,
|
|
NULL,
|
|
Sql_condition::WARN_LEVEL_WARN,
|
|
ebuff);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
void THD::raise_note(uint sql_errno)
|
|
{
|
|
DBUG_ENTER("THD::raise_note");
|
|
DBUG_PRINT("enter", ("code: %d", sql_errno));
|
|
if (!(variables.option_bits & OPTION_SQL_NOTES))
|
|
DBUG_VOID_RETURN;
|
|
const char* msg= ER(sql_errno);
|
|
(void) raise_condition(sql_errno,
|
|
NULL,
|
|
Sql_condition::WARN_LEVEL_NOTE,
|
|
msg);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
void THD::raise_note_printf(uint sql_errno, ...)
|
|
{
|
|
va_list args;
|
|
char ebuff[MYSQL_ERRMSG_SIZE];
|
|
DBUG_ENTER("THD::raise_note_printf");
|
|
DBUG_PRINT("enter",("code: %u", sql_errno));
|
|
if (!(variables.option_bits & OPTION_SQL_NOTES))
|
|
DBUG_VOID_RETURN;
|
|
const char* format= ER(sql_errno);
|
|
va_start(args, sql_errno);
|
|
my_vsnprintf(ebuff, sizeof(ebuff), format, args);
|
|
va_end(args);
|
|
(void) raise_condition(sql_errno,
|
|
NULL,
|
|
Sql_condition::WARN_LEVEL_NOTE,
|
|
ebuff);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
Sql_condition* THD::raise_condition(uint sql_errno,
|
|
const char* sqlstate,
|
|
Sql_condition::enum_warning_level level,
|
|
const char* msg)
|
|
{
|
|
Diagnostics_area *da= get_stmt_da();
|
|
Sql_condition *cond= NULL;
|
|
DBUG_ENTER("THD::raise_condition");
|
|
|
|
if (!(variables.option_bits & OPTION_SQL_NOTES) &&
|
|
(level == Sql_condition::WARN_LEVEL_NOTE))
|
|
DBUG_RETURN(NULL);
|
|
|
|
da->opt_clear_warning_info(query_id);
|
|
|
|
/*
|
|
TODO: replace by DBUG_ASSERT(sql_errno != 0) once all bugs similar to
|
|
Bug#36768 are fixed: a SQL condition must have a real (!=0) error number
|
|
so that it can be caught by handlers.
|
|
*/
|
|
if (sql_errno == 0)
|
|
sql_errno= ER_UNKNOWN_ERROR;
|
|
if (msg == NULL)
|
|
msg= ER(sql_errno);
|
|
if (sqlstate == NULL)
|
|
sqlstate= mysql_errno_to_sqlstate(sql_errno);
|
|
|
|
if ((level == Sql_condition::WARN_LEVEL_WARN) &&
|
|
really_abort_on_warning())
|
|
{
|
|
/*
|
|
FIXME:
|
|
push_warning and strict SQL_MODE case.
|
|
*/
|
|
level= Sql_condition::WARN_LEVEL_ERROR;
|
|
killed= KILL_BAD_DATA;
|
|
}
|
|
|
|
switch (level)
|
|
{
|
|
case Sql_condition::WARN_LEVEL_NOTE:
|
|
case Sql_condition::WARN_LEVEL_WARN:
|
|
got_warning= 1;
|
|
break;
|
|
case Sql_condition::WARN_LEVEL_ERROR:
|
|
mysql_audit_general(this, MYSQL_AUDIT_GENERAL_ERROR, sql_errno, msg);
|
|
break;
|
|
default:
|
|
DBUG_ASSERT(FALSE);
|
|
}
|
|
|
|
if (handle_condition(sql_errno, sqlstate, level, msg, &cond))
|
|
DBUG_RETURN(cond);
|
|
|
|
if (level == Sql_condition::WARN_LEVEL_ERROR)
|
|
{
|
|
is_slave_error= 1; // needed to catch query errors during replication
|
|
|
|
if (!da->is_error())
|
|
{
|
|
set_row_count_func(-1);
|
|
da->set_error_status(sql_errno, msg, sqlstate, cond);
|
|
}
|
|
}
|
|
|
|
query_cache_abort(&query_cache_tls);
|
|
|
|
/*
|
|
Avoid pushing a condition for fatal out of memory errors as this will
|
|
require memory allocation and therefore might fail. Non fatal out of
|
|
memory errors can occur if raised by SIGNAL/RESIGNAL statement.
|
|
*/
|
|
if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
|
|
sql_errno == ER_OUTOFMEMORY)))
|
|
{
|
|
cond= da->push_warning(this, sql_errno, sqlstate, level, msg);
|
|
}
|
|
DBUG_RETURN(cond);
|
|
}
|
|
|
|
extern "C"
|
|
void *thd_alloc(MYSQL_THD thd, unsigned int size)
|
|
{
|
|
return thd->alloc(size);
|
|
}
|
|
|
|
extern "C"
|
|
void *thd_calloc(MYSQL_THD thd, unsigned int size)
|
|
{
|
|
return thd->calloc(size);
|
|
}
|
|
|
|
extern "C"
|
|
char *thd_strdup(MYSQL_THD thd, const char *str)
|
|
{
|
|
return thd->strdup(str);
|
|
}
|
|
|
|
extern "C"
|
|
char *thd_strmake(MYSQL_THD thd, const char *str, unsigned int size)
|
|
{
|
|
return thd->strmake(str, size);
|
|
}
|
|
|
|
extern "C"
|
|
LEX_STRING *thd_make_lex_string(THD *thd, LEX_STRING *lex_str,
|
|
const char *str, unsigned int size,
|
|
int allocate_lex_string)
|
|
{
|
|
return allocate_lex_string ? thd->make_lex_string(str, size)
|
|
: thd->make_lex_string(lex_str, str, size);
|
|
}
|
|
|
|
extern "C"
|
|
void *thd_memdup(MYSQL_THD thd, const void* str, unsigned int size)
|
|
{
|
|
return thd->memdup(str, size);
|
|
}
|
|
|
|
extern "C"
|
|
void thd_get_xid(const MYSQL_THD thd, MYSQL_XID *xid)
|
|
{
|
|
*xid = *(MYSQL_XID *) &thd->transaction.xid_state.xid;
|
|
}
|
|
|
|
|
|
extern "C"
|
|
my_time_t thd_TIME_to_gmt_sec(MYSQL_THD thd, const MYSQL_TIME *ltime,
|
|
unsigned int *errcode)
|
|
{
|
|
Time_zone *tz= thd ? thd->variables.time_zone :
|
|
global_system_variables.time_zone;
|
|
return tz->TIME_to_gmt_sec(ltime, errcode);
|
|
}
|
|
|
|
|
|
extern "C"
|
|
void thd_gmt_sec_to_TIME(MYSQL_THD thd, MYSQL_TIME *ltime, my_time_t t)
|
|
{
|
|
Time_zone *tz= thd ? thd->variables.time_zone :
|
|
global_system_variables.time_zone;
|
|
tz->gmt_sec_to_TIME(ltime, t);
|
|
}
|
|
|
|
|
|
#ifdef _WIN32
|
|
extern "C" THD *_current_thd_noinline(void)
|
|
{
|
|
return my_pthread_getspecific_ptr(THD*,THR_THD);
|
|
}
|
|
#endif
|
|
/*
|
|
Init common variables that has to be reset on start and on change_user
|
|
*/
|
|
|
|
void THD::init(void)
|
|
{
|
|
DBUG_ENTER("thd::init");
|
|
mysql_mutex_lock(&LOCK_global_system_variables);
|
|
plugin_thdvar_init(this);
|
|
/*
|
|
variables= global_system_variables above has reset
|
|
variables.pseudo_thread_id to 0. We need to correct it here to
|
|
avoid temporary tables replication failure.
|
|
*/
|
|
variables.pseudo_thread_id= thread_id;
|
|
|
|
variables.default_master_connection.str= default_master_connection_buff;
|
|
::strmake(variables.default_master_connection.str,
|
|
global_system_variables.default_master_connection.str,
|
|
variables.default_master_connection.length);
|
|
|
|
mysql_mutex_unlock(&LOCK_global_system_variables);
|
|
|
|
server_status= SERVER_STATUS_AUTOCOMMIT;
|
|
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
|
|
server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
|
|
|
|
transaction.all.modified_non_trans_table=
|
|
transaction.stmt.modified_non_trans_table= FALSE;
|
|
open_options=ha_open_options;
|
|
update_lock_default= (variables.low_priority_updates ?
|
|
TL_WRITE_LOW_PRIORITY :
|
|
TL_WRITE);
|
|
tx_isolation= (enum_tx_isolation) variables.tx_isolation;
|
|
tx_read_only= variables.tx_read_only;
|
|
update_charset();
|
|
reset_current_stmt_binlog_format_row();
|
|
reset_binlog_local_stmt_filter();
|
|
set_status_var_init();
|
|
bzero((char *) &org_status_var, sizeof(org_status_var));
|
|
start_bytes_received= 0;
|
|
last_commit_gtid.seq_no= 0;
|
|
#ifdef WITH_WSREP
|
|
wsrep_exec_mode= wsrep_applier ? REPL_RECV : LOCAL_STATE;
|
|
wsrep_conflict_state= NO_CONFLICT;
|
|
wsrep_query_state= QUERY_IDLE;
|
|
wsrep_last_query_id= 0;
|
|
wsrep_trx_meta.gtid= WSREP_GTID_UNDEFINED;
|
|
wsrep_trx_meta.depends_on= WSREP_SEQNO_UNDEFINED;
|
|
wsrep_converted_lock_session= false;
|
|
wsrep_retry_counter= 0;
|
|
wsrep_rli= NULL;
|
|
wsrep_rgi= NULL;
|
|
wsrep_PA_safe= true;
|
|
wsrep_consistency_check = NO_CONSISTENCY_CHECK;
|
|
wsrep_mysql_replicated = 0;
|
|
|
|
wsrep_TOI_pre_query = NULL;
|
|
wsrep_TOI_pre_query_len = 0;
|
|
#endif
|
|
if (variables.sql_log_bin)
|
|
variables.option_bits|= OPTION_BIN_LOG;
|
|
else
|
|
variables.option_bits&= ~OPTION_BIN_LOG;
|
|
|
|
select_commands= update_commands= other_commands= 0;
|
|
/* Set to handle counting of aborted connections */
|
|
userstat_running= opt_userstat_running;
|
|
last_global_update_time= current_connect_time= time(NULL);
|
|
#if defined(ENABLED_DEBUG_SYNC)
|
|
/* Initialize the Debug Sync Facility. See debug_sync.cc. */
|
|
debug_sync_init_thread(this);
|
|
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
|
apc_target.init(&LOCK_thd_data);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/* Updates some status variables to be used by update_global_user_stats */
|
|
|
|
void THD::update_stats(void)
|
|
{
|
|
/* sql_command == SQLCOM_END in case of parse errors or quit */
|
|
if (lex->sql_command != SQLCOM_END)
|
|
{
|
|
/* A SQL query. */
|
|
if (lex->sql_command == SQLCOM_SELECT)
|
|
select_commands++;
|
|
else if (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND)
|
|
{
|
|
/* Ignore 'SHOW ' commands */
|
|
}
|
|
else if (is_update_query(lex->sql_command))
|
|
update_commands++;
|
|
else
|
|
other_commands++;
|
|
}
|
|
}
|
|
|
|
|
|
void THD::update_all_stats()
|
|
{
|
|
ulonglong end_cpu_time, end_utime;
|
|
double busy_time, cpu_time;
|
|
|
|
/* This is set at start of query if opt_userstat_running was set */
|
|
if (!userstat_running)
|
|
return;
|
|
|
|
end_cpu_time= my_getcputime();
|
|
end_utime= microsecond_interval_timer();
|
|
busy_time= (end_utime - start_utime) / 1000000.0;
|
|
cpu_time= (end_cpu_time - start_cpu_time) / 10000000.0;
|
|
/* In case there are bad values, 2629743 is the #seconds in a month. */
|
|
if (cpu_time > 2629743.0)
|
|
cpu_time= 0;
|
|
status_var_add(status_var.cpu_time, cpu_time);
|
|
status_var_add(status_var.busy_time, busy_time);
|
|
|
|
update_global_user_stats(this, TRUE, my_time(0));
|
|
// Has to be updated after update_global_user_stats()
|
|
userstat_running= 0;
|
|
}
|
|
|
|
|
|
/*
|
|
Init THD for query processing.
|
|
This has to be called once before we call mysql_parse.
|
|
See also comments in sql_class.h.
|
|
*/
|
|
|
|
void THD::init_for_queries()
|
|
{
|
|
set_time();
|
|
ha_enable_transaction(this,TRUE);
|
|
|
|
reset_root_defaults(mem_root, variables.query_alloc_block_size,
|
|
variables.query_prealloc_size);
|
|
reset_root_defaults(&transaction.mem_root,
|
|
variables.trans_alloc_block_size,
|
|
variables.trans_prealloc_size);
|
|
transaction.xid_state.xid.null();
|
|
transaction.xid_state.in_thd=1;
|
|
}
|
|
|
|
|
|
/*
|
|
Do what's needed when one invokes change user
|
|
|
|
SYNOPSIS
|
|
change_user()
|
|
|
|
IMPLEMENTATION
|
|
Reset all resources that are connection specific
|
|
*/
|
|
|
|
|
|
void THD::change_user(void)
|
|
{
|
|
add_status_to_global();
|
|
|
|
cleanup();
|
|
reset_killed();
|
|
cleanup_done= 0;
|
|
init();
|
|
stmt_map.reset();
|
|
my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
|
|
(my_hash_get_key) get_var_key,
|
|
(my_hash_free_key) free_user_var, 0);
|
|
sp_cache_clear(&sp_proc_cache);
|
|
sp_cache_clear(&sp_func_cache);
|
|
}
|
|
|
|
|
|
/* Do operations that may take a long time */
|
|
|
|
void THD::cleanup(void)
|
|
{
|
|
DBUG_ENTER("THD::cleanup");
|
|
DBUG_ASSERT(cleanup_done == 0);
|
|
|
|
killed= KILL_CONNECTION;
|
|
#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
|
|
if (transaction.xid_state.xa_state == XA_PREPARED)
|
|
{
|
|
#error xid_state in the cache should be replaced by the allocated value
|
|
}
|
|
#endif
|
|
|
|
mysql_ha_cleanup(this);
|
|
locked_tables_list.unlock_locked_tables(this);
|
|
|
|
close_temporary_tables(this);
|
|
|
|
transaction.xid_state.xa_state= XA_NOTR;
|
|
trans_rollback(this);
|
|
xid_cache_delete(&transaction.xid_state);
|
|
|
|
DBUG_ASSERT(open_tables == NULL);
|
|
/*
|
|
If the thread was in the middle of an ongoing transaction (rolled
|
|
back a few lines above) or under LOCK TABLES (unlocked the tables
|
|
and left the mode a few lines above), there will be outstanding
|
|
metadata locks. Release them.
|
|
*/
|
|
mdl_context.release_transactional_locks();
|
|
|
|
/* Release the global read lock, if acquired. */
|
|
if (global_read_lock.is_acquired())
|
|
global_read_lock.unlock_global_read_lock(this);
|
|
|
|
if (user_connect)
|
|
{
|
|
decrease_user_connections(user_connect);
|
|
user_connect= 0; // Safety
|
|
}
|
|
wt_thd_destroy(&transaction.wt);
|
|
|
|
#if defined(ENABLED_DEBUG_SYNC)
|
|
/* End the Debug Sync Facility. See debug_sync.cc. */
|
|
debug_sync_end_thread(this);
|
|
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
|
|
|
delete_dynamic(&user_var_events);
|
|
my_hash_free(&user_vars);
|
|
sp_cache_clear(&sp_proc_cache);
|
|
sp_cache_clear(&sp_func_cache);
|
|
|
|
mysql_ull_cleanup(this);
|
|
/* All metadata locks must have been released by now. */
|
|
DBUG_ASSERT(!mdl_context.has_locks());
|
|
|
|
apc_target.destroy();
|
|
cleanup_done=1;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
THD::~THD()
|
|
{
|
|
THD *orig_thd= current_thd;
|
|
THD_CHECK_SENTRY(this);
|
|
DBUG_ENTER("~THD()");
|
|
|
|
/*
|
|
In error cases, thd may not be current thd. We have to fix this so
|
|
that memory allocation counting is done correctly
|
|
*/
|
|
set_current_thd(this);
|
|
|
|
/* Ensure that no one is using THD */
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
|
|
#ifdef WITH_WSREP
|
|
mysql_mutex_lock(&LOCK_wsrep_thd);
|
|
mysql_mutex_unlock(&LOCK_wsrep_thd);
|
|
mysql_mutex_destroy(&LOCK_wsrep_thd);
|
|
if (wsrep_rli) delete wsrep_rli;
|
|
if (wsrep_rgi) delete wsrep_rgi;
|
|
if (wsrep_status_vars) wsrep->stats_free(wsrep, wsrep_status_vars);
|
|
#endif
|
|
/* Close connection */
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (net.vio)
|
|
vio_delete(net.vio);
|
|
net_end(&net);
|
|
#endif
|
|
stmt_map.reset(); /* close all prepared statements */
|
|
if (!cleanup_done)
|
|
cleanup();
|
|
|
|
mdl_context.destroy();
|
|
ha_close_connection(this);
|
|
mysql_audit_release(this);
|
|
plugin_thdvar_cleanup(this);
|
|
|
|
main_security_ctx.destroy();
|
|
my_free(db);
|
|
db= NULL;
|
|
free_root(&transaction.mem_root,MYF(0));
|
|
mysql_cond_destroy(&COND_wakeup_ready);
|
|
mysql_mutex_destroy(&LOCK_wakeup_ready);
|
|
mysql_mutex_destroy(&LOCK_thd_data);
|
|
#ifndef DBUG_OFF
|
|
dbug_sentry= THD_SENTRY_GONE;
|
|
#endif
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (rgi_fake)
|
|
{
|
|
delete rgi_fake;
|
|
rgi_fake= NULL;
|
|
}
|
|
if (rli_fake)
|
|
{
|
|
delete rli_fake;
|
|
rli_fake= NULL;
|
|
}
|
|
|
|
mysql_audit_free_thd(this);
|
|
if (rgi_slave)
|
|
rgi_slave->cleanup_after_session();
|
|
#endif
|
|
|
|
free_root(&main_mem_root, MYF(0));
|
|
main_da.free_memory();
|
|
if (status_var.memory_used != 0)
|
|
{
|
|
DBUG_PRINT("error", ("memory_used: %lld", status_var.memory_used));
|
|
SAFEMALLOC_REPORT_MEMORY(my_thread_dbug_id());
|
|
DBUG_ASSERT(status_var.memory_used == 0); // Ensure everything is freed
|
|
}
|
|
|
|
set_current_thd(orig_thd);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Add all status variables to another status variable array
|
|
|
|
SYNOPSIS
|
|
add_to_status()
|
|
to_var add to this array
|
|
from_var from this array
|
|
|
|
NOTES
|
|
This function assumes that all variables at start are long/ulong and
|
|
other types are handled explicitely
|
|
*/
|
|
|
|
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
|
|
{
|
|
ulong *end= (ulong*) ((uchar*) to_var +
|
|
offsetof(STATUS_VAR, last_system_status_var) +
|
|
sizeof(ulong));
|
|
ulong *to= (ulong*) to_var, *from= (ulong*) from_var;
|
|
|
|
while (to != end)
|
|
*(to++)+= *(from++);
|
|
|
|
/* Handle the not ulong variables. See end of system_status_var */
|
|
to_var->bytes_received+= from_var->bytes_received;
|
|
to_var->bytes_sent+= from_var->bytes_sent;
|
|
to_var->rows_read+= from_var->rows_read;
|
|
to_var->rows_sent+= from_var->rows_sent;
|
|
to_var->rows_tmp_read+= from_var->rows_tmp_read;
|
|
to_var->binlog_bytes_written+= from_var->binlog_bytes_written;
|
|
to_var->cpu_time+= from_var->cpu_time;
|
|
to_var->busy_time+= from_var->busy_time;
|
|
}
|
|
|
|
/*
|
|
Add the difference between two status variable arrays to another one.
|
|
|
|
SYNOPSIS
|
|
add_diff_to_status
|
|
to_var add to this array
|
|
from_var from this array
|
|
dec_var minus this array
|
|
|
|
NOTE
|
|
This function assumes that all variables at start are long/ulong and
|
|
other types are handled explicitely
|
|
*/
|
|
|
|
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
|
|
STATUS_VAR *dec_var)
|
|
{
|
|
ulong *end= (ulong*) ((uchar*) to_var + offsetof(STATUS_VAR,
|
|
last_system_status_var) +
|
|
sizeof(ulong));
|
|
ulong *to= (ulong*) to_var, *from= (ulong*) from_var, *dec= (ulong*) dec_var;
|
|
|
|
while (to != end)
|
|
*(to++)+= *(from++) - *(dec++);
|
|
|
|
to_var->bytes_received+= from_var->bytes_received -
|
|
dec_var->bytes_received;
|
|
to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent;
|
|
to_var->rows_read+= from_var->rows_read - dec_var->rows_read;
|
|
to_var->rows_sent+= from_var->rows_sent - dec_var->rows_sent;
|
|
to_var->rows_tmp_read+= from_var->rows_tmp_read - dec_var->rows_tmp_read;
|
|
to_var->binlog_bytes_written+= from_var->binlog_bytes_written -
|
|
dec_var->binlog_bytes_written;
|
|
to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time;
|
|
to_var->busy_time+= from_var->busy_time - dec_var->busy_time;
|
|
}
|
|
|
|
#define SECONDS_TO_WAIT_FOR_KILL 2
|
|
#if !defined(__WIN__) && defined(HAVE_SELECT)
|
|
/* my_sleep() can wait for sub second times */
|
|
#define WAIT_FOR_KILL_TRY_TIMES 20
|
|
#else
|
|
#define WAIT_FOR_KILL_TRY_TIMES 2
|
|
#endif
|
|
|
|
|
|
/**
|
|
Awake a thread.
|
|
|
|
@param[in] state_to_set value for THD::killed
|
|
|
|
This is normally called from another thread's THD object.
|
|
|
|
@note Do always call this while holding LOCK_thd_data.
|
|
*/
|
|
|
|
void THD::awake(killed_state state_to_set)
|
|
{
|
|
DBUG_ENTER("THD::awake");
|
|
DBUG_PRINT("enter", ("this: %p current_thd: %p", this, current_thd));
|
|
THD_CHECK_SENTRY(this);
|
|
mysql_mutex_assert_owner(&LOCK_thd_data);
|
|
|
|
print_aborted_warning(3, "KILLED");
|
|
|
|
/* Set the 'killed' flag of 'this', which is the target THD object. */
|
|
killed= state_to_set;
|
|
|
|
if (state_to_set >= KILL_CONNECTION || state_to_set == NOT_KILLED)
|
|
{
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
if (this != current_thd)
|
|
{
|
|
if(active_vio)
|
|
vio_shutdown(active_vio, SHUT_RDWR);
|
|
}
|
|
#endif
|
|
|
|
/* Mark the target thread's alarm request expired, and signal alarm. */
|
|
thr_alarm_kill(thread_id);
|
|
|
|
/* Send an event to the scheduler that a thread should be killed. */
|
|
if (!slave_thread)
|
|
MYSQL_CALLBACK(scheduler, post_kill_notification, (this));
|
|
}
|
|
|
|
/* Interrupt target waiting inside a storage engine. */
|
|
if (state_to_set != NOT_KILLED)
|
|
#ifdef WITH_WSREP
|
|
/* TODO: prevent applier close here */
|
|
#endif /* WITH_WSREP */
|
|
ha_kill_query(this, thd_kill_level(this));
|
|
|
|
/* Broadcast a condition to kick the target if it is waiting on it. */
|
|
if (mysys_var)
|
|
{
|
|
mysql_mutex_lock(&mysys_var->mutex);
|
|
if (!system_thread) // Don't abort locks
|
|
mysys_var->abort=1;
|
|
/*
|
|
This broadcast could be up in the air if the victim thread
|
|
exits the cond in the time between read and broadcast, but that is
|
|
ok since all we want to do is to make the victim thread get out
|
|
of waiting on current_cond.
|
|
If we see a non-zero current_cond: it cannot be an old value (because
|
|
then exit_cond() should have run and it can't because we have mutex); so
|
|
it is the true value but maybe current_mutex is not yet non-zero (we're
|
|
in the middle of enter_cond() and there is a "memory order
|
|
inversion"). So we test the mutex too to not lock 0.
|
|
|
|
Note that there is a small chance we fail to kill. If victim has locked
|
|
current_mutex, but hasn't yet entered enter_cond() (which means that
|
|
current_cond and current_mutex are 0), then the victim will not get
|
|
a signal and it may wait "forever" on the cond (until
|
|
we issue a second KILL or the status it's waiting for happens).
|
|
It's true that we have set its thd->killed but it may not
|
|
see it immediately and so may have time to reach the cond_wait().
|
|
|
|
However, where possible, we test for killed once again after
|
|
enter_cond(). This should make the signaling as safe as possible.
|
|
However, there is still a small chance of failure on platforms with
|
|
instruction or memory write reordering.
|
|
|
|
We have to do the loop with trylock, because if we would use
|
|
pthread_mutex_lock(), we can cause a deadlock as we are here locking
|
|
the mysys_var->mutex and mysys_var->current_mutex in a different order
|
|
than in the thread we are trying to kill.
|
|
We only sleep for 2 seconds as we don't want to have LOCK_thd_data
|
|
locked too long time.
|
|
|
|
There is a small change we may not succeed in aborting a thread that
|
|
is not yet waiting for a mutex, but as this happens only for a
|
|
thread that was doing something else when the kill was issued and
|
|
which should detect the kill flag before it starts to wait, this
|
|
should be good enough.
|
|
*/
|
|
if (mysys_var->current_cond && mysys_var->current_mutex)
|
|
{
|
|
uint i;
|
|
for (i= 0; i < WAIT_FOR_KILL_TRY_TIMES * SECONDS_TO_WAIT_FOR_KILL; i++)
|
|
{
|
|
int ret= mysql_mutex_trylock(mysys_var->current_mutex);
|
|
mysql_cond_broadcast(mysys_var->current_cond);
|
|
if (!ret)
|
|
{
|
|
/* Signal is sure to get through */
|
|
mysql_mutex_unlock(mysys_var->current_mutex);
|
|
break;
|
|
}
|
|
my_sleep(1000000L / WAIT_FOR_KILL_TRY_TIMES);
|
|
}
|
|
}
|
|
mysql_mutex_unlock(&mysys_var->mutex);
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/**
|
|
Close the Vio associated this session.
|
|
|
|
@remark LOCK_thd_data is taken due to the fact that
|
|
the Vio might be disassociated concurrently.
|
|
*/
|
|
|
|
void THD::disconnect()
|
|
{
|
|
Vio *vio= NULL;
|
|
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
|
|
killed= KILL_CONNECTION;
|
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
/*
|
|
Since a active vio might might have not been set yet, in
|
|
any case save a reference to avoid closing a inexistent
|
|
one or closing the vio twice if there is a active one.
|
|
*/
|
|
vio= active_vio;
|
|
close_active_vio();
|
|
#endif
|
|
|
|
/* Disconnect even if a active vio is not associated. */
|
|
if (net.vio != vio)
|
|
vio_close(net.vio);
|
|
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
}
|
|
|
|
|
|
bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use,
|
|
bool needs_thr_lock_abort)
|
|
{
|
|
THD *in_use= ctx_in_use->get_thd();
|
|
bool signalled= FALSE;
|
|
|
|
if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
|
|
!in_use->killed)
|
|
{
|
|
in_use->killed= KILL_CONNECTION;
|
|
mysql_mutex_lock(&in_use->mysys_var->mutex);
|
|
if (in_use->mysys_var->current_cond)
|
|
mysql_cond_broadcast(in_use->mysys_var->current_cond);
|
|
mysql_mutex_unlock(&in_use->mysys_var->mutex);
|
|
signalled= TRUE;
|
|
}
|
|
|
|
if (needs_thr_lock_abort)
|
|
{
|
|
mysql_mutex_lock(&in_use->LOCK_thd_data);
|
|
for (TABLE *thd_table= in_use->open_tables;
|
|
thd_table ;
|
|
thd_table= thd_table->next)
|
|
{
|
|
/*
|
|
Check for TABLE::needs_reopen() is needed since in some places we call
|
|
handler::close() for table instance (and set TABLE::db_stat to 0)
|
|
and do not remove such instances from the THD::open_tables
|
|
for some time, during which other thread can see those instances
|
|
(e.g. see partitioning code).
|
|
*/
|
|
if (!thd_table->needs_reopen())
|
|
#ifdef WITH_WSREP
|
|
{
|
|
signalled|= mysql_lock_abort_for_thread(this, thd_table);
|
|
if (this && WSREP(this) && wsrep_thd_is_BF((void *)this, FALSE))
|
|
{
|
|
WSREP_DEBUG("remove_table_from_cache: %llu",
|
|
(unsigned long long) this->real_id);
|
|
wsrep_abort_thd((void *)this, (void *)in_use, FALSE);
|
|
}
|
|
}
|
|
#else
|
|
signalled|= mysql_lock_abort_for_thread(this, thd_table);
|
|
#endif
|
|
}
|
|
mysql_mutex_unlock(&in_use->LOCK_thd_data);
|
|
}
|
|
return signalled;
|
|
}
|
|
|
|
|
|
/*
|
|
Get error number for killed state
|
|
Note that the error message can't have any parameters.
|
|
See thd::kill_message()
|
|
*/
|
|
|
|
int killed_errno(killed_state killed)
|
|
{
|
|
DBUG_ENTER("killed_errno");
|
|
DBUG_PRINT("enter", ("killed: %d", killed));
|
|
|
|
switch (killed) {
|
|
case NOT_KILLED:
|
|
case KILL_HARD_BIT:
|
|
DBUG_RETURN(0); // Probably wrong usage
|
|
case KILL_BAD_DATA:
|
|
case KILL_BAD_DATA_HARD:
|
|
case ABORT_QUERY_HARD:
|
|
case ABORT_QUERY:
|
|
DBUG_RETURN(0); // Not a real error
|
|
case KILL_CONNECTION:
|
|
case KILL_CONNECTION_HARD:
|
|
case KILL_SYSTEM_THREAD:
|
|
case KILL_SYSTEM_THREAD_HARD:
|
|
DBUG_RETURN(ER_CONNECTION_KILLED);
|
|
case KILL_QUERY:
|
|
case KILL_QUERY_HARD:
|
|
DBUG_RETURN(ER_QUERY_INTERRUPTED);
|
|
case KILL_SERVER:
|
|
case KILL_SERVER_HARD:
|
|
DBUG_RETURN(ER_SERVER_SHUTDOWN);
|
|
}
|
|
DBUG_RETURN(0); // Keep compiler happy
|
|
}
|
|
|
|
|
|
/*
|
|
Remember the location of thread info, the structure needed for
|
|
sql_alloc() and the structure for the net buffer
|
|
*/
|
|
|
|
bool THD::store_globals()
|
|
{
|
|
/*
|
|
Assert that thread_stack is initialized: it's necessary to be able
|
|
to track stack overrun.
|
|
*/
|
|
DBUG_ASSERT(thread_stack);
|
|
|
|
if (set_current_thd(this) ||
|
|
my_pthread_setspecific_ptr(THR_MALLOC, &mem_root))
|
|
return 1;
|
|
/*
|
|
mysys_var is concurrently readable by a killer thread.
|
|
It is protected by LOCK_thd_data, it is not needed to lock while the
|
|
pointer is changing from NULL not non-NULL. If the kill thread reads
|
|
NULL it doesn't refer to anything, but if it is non-NULL we need to
|
|
ensure that the thread doesn't proceed to assign another thread to
|
|
have the mysys_var reference (which in fact refers to the worker
|
|
threads local storage with key THR_KEY_mysys.
|
|
*/
|
|
mysys_var=my_thread_var;
|
|
/*
|
|
Let mysqld define the thread id (not mysys)
|
|
This allows us to move THD to different threads if needed.
|
|
*/
|
|
mysys_var->id= thread_id;
|
|
real_id= pthread_self(); // For debugging
|
|
mysys_var->stack_ends_here= thread_stack + // for consistency, see libevent_thread_proc
|
|
STACK_DIRECTION * (long)my_thread_stack_size;
|
|
vio_set_thread_id(net.vio, real_id);
|
|
/*
|
|
We have to call thr_lock_info_init() again here as THD may have been
|
|
created in another thread
|
|
*/
|
|
thr_lock_info_init(&lock_info);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
Untie THD from current thread
|
|
|
|
Used when using --thread-handling=pool-of-threads
|
|
*/
|
|
|
|
void THD::reset_globals()
|
|
{
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
mysys_var= 0;
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
|
|
/* Undocking the thread specific data. */
|
|
set_current_thd(0);
|
|
my_pthread_setspecific_ptr(THR_MALLOC, NULL);
|
|
|
|
}
|
|
|
|
/*
|
|
Cleanup after query.
|
|
|
|
SYNOPSIS
|
|
THD::cleanup_after_query()
|
|
|
|
DESCRIPTION
|
|
This function is used to reset thread data to its default state.
|
|
|
|
NOTE
|
|
This function is not suitable for setting thread data to some
|
|
non-default values, as there is only one replication thread, so
|
|
different master threads may overwrite data of each other on
|
|
slave.
|
|
*/
|
|
|
|
void THD::cleanup_after_query()
|
|
{
|
|
DBUG_ENTER("THD::cleanup_after_query");
|
|
|
|
thd_progress_end(this);
|
|
|
|
/*
|
|
Reset rand_used so that detection of calls to rand() will save random
|
|
seeds if needed by the slave.
|
|
|
|
Do not reset rand_used if inside a stored function or trigger because
|
|
only the call to these operations is logged. Thus only the calling
|
|
statement needs to detect rand() calls made by its substatements. These
|
|
substatements must not set rand_used to 0 because it would remove the
|
|
detection of rand() by the calling statement.
|
|
*/
|
|
if (!in_sub_stmt) /* stored functions and triggers are a special case */
|
|
{
|
|
/* Forget those values, for next binlogger: */
|
|
stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0;
|
|
auto_inc_intervals_in_cur_stmt_for_binlog.empty();
|
|
rand_used= 0;
|
|
#ifndef EMBEDDED_LIBRARY
|
|
/*
|
|
Clean possible unused INSERT_ID events by current statement.
|
|
is_update_query() is needed to ignore SET statements:
|
|
Statements that don't update anything directly and don't
|
|
used stored functions. This is mostly necessary to ignore
|
|
statements in binlog between SET INSERT_ID and DML statement
|
|
which is intended to consume its event (there can be other
|
|
SET statements between them).
|
|
*/
|
|
if ((rgi_slave || rli_fake) && is_update_query(lex->sql_command))
|
|
auto_inc_intervals_forced.empty();
|
|
#endif
|
|
}
|
|
/*
|
|
Forget the binlog stmt filter for the next query.
|
|
There are some code paths that:
|
|
- do not call THD::decide_logging_format()
|
|
- do call THD::binlog_query(),
|
|
making this reset necessary.
|
|
*/
|
|
reset_binlog_local_stmt_filter();
|
|
if (first_successful_insert_id_in_cur_stmt > 0)
|
|
{
|
|
/* set what LAST_INSERT_ID() will return */
|
|
first_successful_insert_id_in_prev_stmt=
|
|
first_successful_insert_id_in_cur_stmt;
|
|
first_successful_insert_id_in_cur_stmt= 0;
|
|
substitute_null_with_insert_id= TRUE;
|
|
}
|
|
arg_of_last_insert_id_function= 0;
|
|
/* Free Items that were created during this execution */
|
|
free_items();
|
|
/* Reset where. */
|
|
where= THD::DEFAULT_WHERE;
|
|
/* reset table map for multi-table update */
|
|
table_map_for_update= 0;
|
|
m_binlog_invoker= INVOKER_NONE;
|
|
#ifdef WITH_WSREP
|
|
if (TOTAL_ORDER == wsrep_exec_mode)
|
|
{
|
|
wsrep_exec_mode = LOCAL_STATE;
|
|
}
|
|
//wsrep_trx_seqno = 0;
|
|
#endif /* WITH_WSREP */
|
|
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (rgi_slave)
|
|
rgi_slave->cleanup_after_query();
|
|
#endif
|
|
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Convert a string to another character set
|
|
|
|
SYNOPSIS
|
|
convert_string()
|
|
to Store new allocated string here
|
|
to_cs New character set for allocated string
|
|
from String to convert
|
|
from_length Length of string to convert
|
|
from_cs Original character set
|
|
|
|
NOTES
|
|
to will be 0-terminated to make it easy to pass to system funcs
|
|
|
|
RETURN
|
|
0 ok
|
|
1 End of memory.
|
|
In this case to->str will point to 0 and to->length will be 0.
|
|
*/
|
|
|
|
bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
|
|
const char *from, uint from_length,
|
|
CHARSET_INFO *from_cs)
|
|
{
|
|
DBUG_ENTER("convert_string");
|
|
size_t new_length= to_cs->mbmaxlen * from_length;
|
|
uint dummy_errors;
|
|
if (!(to->str= (char*) alloc(new_length+1)))
|
|
{
|
|
to->length= 0; // Safety fix
|
|
DBUG_RETURN(1); // EOM
|
|
}
|
|
to->length= copy_and_convert((char*) to->str, new_length, to_cs,
|
|
from, from_length, from_cs, &dummy_errors);
|
|
to->str[to->length]=0; // Safety
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
/*
|
|
Convert string from source character set to target character set inplace.
|
|
|
|
SYNOPSIS
|
|
THD::convert_string
|
|
|
|
DESCRIPTION
|
|
Convert string using convert_buffer - buffer for character set
|
|
conversion shared between all protocols.
|
|
|
|
RETURN
|
|
0 ok
|
|
!0 out of memory
|
|
*/
|
|
|
|
bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs)
|
|
{
|
|
uint dummy_errors;
|
|
if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors))
|
|
return TRUE;
|
|
/* If convert_buffer >> s copying is more efficient long term */
|
|
if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 ||
|
|
!s->is_alloced())
|
|
{
|
|
return s->copy(convert_buffer);
|
|
}
|
|
s->swap(convert_buffer);
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
/*
|
|
Update some cache variables when character set changes
|
|
*/
|
|
|
|
void THD::update_charset()
|
|
{
|
|
uint32 not_used;
|
|
charset_is_system_charset=
|
|
!String::needs_conversion(0,
|
|
variables.character_set_client,
|
|
system_charset_info,
|
|
¬_used);
|
|
charset_is_collation_connection=
|
|
!String::needs_conversion(0,
|
|
variables.character_set_client,
|
|
variables.collation_connection,
|
|
¬_used);
|
|
charset_is_character_set_filesystem=
|
|
!String::needs_conversion(0,
|
|
variables.character_set_client,
|
|
variables.character_set_filesystem,
|
|
¬_used);
|
|
}
|
|
|
|
|
|
/* routings to adding tables to list of changed in transaction tables */
|
|
|
|
inline static void list_include(CHANGED_TABLE_LIST** prev,
|
|
CHANGED_TABLE_LIST* curr,
|
|
CHANGED_TABLE_LIST* new_table)
|
|
{
|
|
if (new_table)
|
|
{
|
|
*prev = new_table;
|
|
(*prev)->next = curr;
|
|
}
|
|
}
|
|
|
|
/* add table to list of changed in transaction tables */
|
|
|
|
void THD::add_changed_table(TABLE *table)
|
|
{
|
|
DBUG_ENTER("THD::add_changed_table(table)");
|
|
|
|
DBUG_ASSERT(in_multi_stmt_transaction_mode() && table->file->has_transactions());
|
|
add_changed_table(table->s->table_cache_key.str,
|
|
(long) table->s->table_cache_key.length);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::add_changed_table(const char *key, long key_length)
|
|
{
|
|
DBUG_ENTER("THD::add_changed_table(key)");
|
|
CHANGED_TABLE_LIST **prev_changed = &transaction.changed_tables;
|
|
CHANGED_TABLE_LIST *curr = transaction.changed_tables;
|
|
|
|
for (; curr; prev_changed = &(curr->next), curr = curr->next)
|
|
{
|
|
int cmp = (long)curr->key_length - (long)key_length;
|
|
if (cmp < 0)
|
|
{
|
|
list_include(prev_changed, curr, changed_table_dup(key, key_length));
|
|
DBUG_PRINT("info",
|
|
("key_length: %ld %u", key_length,
|
|
(*prev_changed)->key_length));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
else if (cmp == 0)
|
|
{
|
|
cmp = memcmp(curr->key, key, curr->key_length);
|
|
if (cmp < 0)
|
|
{
|
|
list_include(prev_changed, curr, changed_table_dup(key, key_length));
|
|
DBUG_PRINT("info",
|
|
("key_length: %ld %u", key_length,
|
|
(*prev_changed)->key_length));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
else if (cmp == 0)
|
|
{
|
|
DBUG_PRINT("info", ("already in list"));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
}
|
|
}
|
|
*prev_changed = changed_table_dup(key, key_length);
|
|
DBUG_PRINT("info", ("key_length: %ld %u", key_length,
|
|
(*prev_changed)->key_length));
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
|
|
{
|
|
CHANGED_TABLE_LIST* new_table =
|
|
(CHANGED_TABLE_LIST*) trans_alloc(ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST))+
|
|
key_length + 1);
|
|
if (!new_table)
|
|
{
|
|
my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATALERROR),
|
|
ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1);
|
|
killed= KILL_CONNECTION;
|
|
return 0;
|
|
}
|
|
|
|
new_table->key= ((char*)new_table)+ ALIGN_SIZE(sizeof(CHANGED_TABLE_LIST));
|
|
new_table->next = 0;
|
|
new_table->key_length = key_length;
|
|
::memcpy(new_table->key, key, key_length);
|
|
return new_table;
|
|
}
|
|
|
|
|
|
int THD::send_explain_fields(select_result *result)
|
|
{
|
|
List<Item> field_list;
|
|
make_explain_field_list(field_list);
|
|
result->prepare(field_list, NULL);
|
|
return (result->send_result_set_metadata(field_list,
|
|
Protocol::SEND_NUM_ROWS |
|
|
Protocol::SEND_EOF));
|
|
}
|
|
|
|
|
|
/*
|
|
Populate the provided field_list with EXPLAIN output columns.
|
|
this->lex->describe has the EXPLAIN flags
|
|
*/
|
|
|
|
void THD::make_explain_field_list(List<Item> &field_list)
|
|
{
|
|
Item *item;
|
|
CHARSET_INFO *cs= system_charset_info;
|
|
field_list.push_back(item= new Item_return_int("id",3, MYSQL_TYPE_LONGLONG));
|
|
item->maybe_null= 1;
|
|
field_list.push_back(new Item_empty_string("select_type", 19, cs));
|
|
field_list.push_back(item= new Item_empty_string("table", NAME_CHAR_LEN, cs));
|
|
item->maybe_null= 1;
|
|
if (lex->describe & DESCRIBE_PARTITIONS)
|
|
{
|
|
/* Maximum length of string that make_used_partitions_str() can produce */
|
|
item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
|
|
cs);
|
|
field_list.push_back(item);
|
|
item->maybe_null= 1;
|
|
}
|
|
field_list.push_back(item= new Item_empty_string("type", 10, cs));
|
|
item->maybe_null= 1;
|
|
field_list.push_back(item=new Item_empty_string("possible_keys",
|
|
NAME_CHAR_LEN*MAX_KEY, cs));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item=new Item_empty_string("key", NAME_CHAR_LEN, cs));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item=new Item_empty_string("key_len",
|
|
NAME_CHAR_LEN*MAX_KEY));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item=new Item_empty_string("ref",
|
|
NAME_CHAR_LEN*MAX_REF_PARTS,
|
|
cs));
|
|
item->maybe_null=1;
|
|
field_list.push_back(item= new Item_return_int("rows", 10,
|
|
MYSQL_TYPE_LONGLONG));
|
|
if (lex->describe & DESCRIBE_EXTENDED)
|
|
{
|
|
field_list.push_back(item= new Item_float("filtered", 0.1234, 2, 4));
|
|
item->maybe_null=1;
|
|
}
|
|
item->maybe_null= 1;
|
|
field_list.push_back(new Item_empty_string("Extra", 255, cs));
|
|
}
|
|
|
|
|
|
#ifdef SIGNAL_WITH_VIO_CLOSE
|
|
void THD::close_active_vio()
|
|
{
|
|
DBUG_ENTER("close_active_vio");
|
|
mysql_mutex_assert_owner(&LOCK_thd_data);
|
|
#ifndef EMBEDDED_LIBRARY
|
|
if (active_vio)
|
|
{
|
|
vio_close(active_vio);
|
|
active_vio = 0;
|
|
}
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
#endif
|
|
|
|
|
|
struct Item_change_record: public ilink
|
|
{
|
|
Item **place;
|
|
Item *old_value;
|
|
/* Placement new was hidden by `new' in ilink (TODO: check): */
|
|
static void *operator new(size_t size, void *mem) { return mem; }
|
|
static void operator delete(void *ptr, size_t size) {}
|
|
static void operator delete(void *ptr, void *mem) { /* never called */ }
|
|
};
|
|
|
|
|
|
/*
|
|
Register an item tree tree transformation, performed by the query
|
|
optimizer. We need a pointer to runtime_memroot because it may be !=
|
|
thd->mem_root (due to possible set_n_backup_active_arena called for thd).
|
|
*/
|
|
|
|
void THD::nocheck_register_item_tree_change(Item **place, Item *old_value,
|
|
MEM_ROOT *runtime_memroot)
|
|
{
|
|
Item_change_record *change;
|
|
/*
|
|
Now we use one node per change, which adds some memory overhead,
|
|
but still is rather fast as we use alloc_root for allocations.
|
|
A list of item tree changes of an average query should be short.
|
|
*/
|
|
void *change_mem= alloc_root(runtime_memroot, sizeof(*change));
|
|
if (change_mem == 0)
|
|
{
|
|
/*
|
|
OOM, thd->fatal_error() is called by the error handler of the
|
|
memroot. Just return.
|
|
*/
|
|
return;
|
|
}
|
|
change= new (change_mem) Item_change_record;
|
|
change->place= place;
|
|
change->old_value= old_value;
|
|
change_list.append(change);
|
|
}
|
|
|
|
/**
|
|
Check and register item change if needed
|
|
|
|
@param place place where we should assign new value
|
|
@param new_value place of the new value
|
|
|
|
@details
|
|
Let C be a reference to an item that changed the reference A
|
|
at the location (occurrence) L1 and this change has been registered.
|
|
If C is substituted for reference A another location (occurrence) L2
|
|
that is to be registered as well than this change has to be
|
|
consistent with the first change in order the procedure that rollback
|
|
changes to substitute the same reference at both locations L1 and L2.
|
|
*/
|
|
|
|
void THD::check_and_register_item_tree_change(Item **place, Item **new_value,
|
|
MEM_ROOT *runtime_memroot)
|
|
{
|
|
Item_change_record *change;
|
|
I_List_iterator<Item_change_record> it(change_list);
|
|
while ((change= it++))
|
|
{
|
|
if (change->place == new_value)
|
|
break; // we need only very first value
|
|
}
|
|
if (change)
|
|
nocheck_register_item_tree_change(place, change->old_value,
|
|
runtime_memroot);
|
|
}
|
|
|
|
|
|
void THD::rollback_item_tree_changes()
|
|
{
|
|
I_List_iterator<Item_change_record> it(change_list);
|
|
Item_change_record *change;
|
|
DBUG_ENTER("rollback_item_tree_changes");
|
|
|
|
while ((change= it++))
|
|
*change->place= change->old_value;
|
|
/* We can forget about changes memory: it's allocated in runtime memroot */
|
|
change_list.empty();
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*****************************************************************************
|
|
** Functions to provide a interface to select results
|
|
*****************************************************************************/
|
|
|
|
select_result::select_result()
|
|
{
|
|
thd=current_thd;
|
|
}
|
|
|
|
void select_result::cleanup()
|
|
{
|
|
/* do nothing */
|
|
}
|
|
|
|
bool select_result::check_simple_select() const
|
|
{
|
|
my_error(ER_SP_BAD_CURSOR_QUERY, MYF(0));
|
|
return TRUE;
|
|
}
|
|
|
|
|
|
static String default_line_term("\n",default_charset_info);
|
|
static String default_escaped("\\",default_charset_info);
|
|
static String default_field_term("\t",default_charset_info);
|
|
static String default_enclosed_and_line_start("", default_charset_info);
|
|
static String default_xml_row_term("<row>", default_charset_info);
|
|
|
|
sql_exchange::sql_exchange(char *name, bool flag,
|
|
enum enum_filetype filetype_arg)
|
|
:file_name(name), opt_enclosed(0), dumpfile(flag), skip_lines(0)
|
|
{
|
|
filetype= filetype_arg;
|
|
field_term= &default_field_term;
|
|
enclosed= line_start= &default_enclosed_and_line_start;
|
|
line_term= filetype == FILETYPE_CSV ?
|
|
&default_line_term : &default_xml_row_term;
|
|
escaped= &default_escaped;
|
|
cs= NULL;
|
|
}
|
|
|
|
bool sql_exchange::escaped_given(void)
|
|
{
|
|
return escaped != &default_escaped;
|
|
}
|
|
|
|
|
|
bool select_send::send_result_set_metadata(List<Item> &list, uint flags)
|
|
{
|
|
bool res;
|
|
#ifdef WITH_WSREP
|
|
if (WSREP(thd) && thd->wsrep_retry_query)
|
|
{
|
|
WSREP_DEBUG("skipping select metadata");
|
|
return FALSE;
|
|
}
|
|
#endif /* WITH_WSREP */
|
|
if (!(res= thd->protocol->send_result_set_metadata(&list, flags)))
|
|
is_result_set_started= 1;
|
|
return res;
|
|
}
|
|
|
|
void select_send::abort_result_set()
|
|
{
|
|
DBUG_ENTER("select_send::abort_result_set");
|
|
|
|
if (is_result_set_started && thd->spcont)
|
|
{
|
|
/*
|
|
We're executing a stored procedure, have an open result
|
|
set and an SQL exception condition. In this situation we
|
|
must abort the current statement, silence the error and
|
|
start executing the continue/exit handler if one is found.
|
|
Before aborting the statement, let's end the open result set, as
|
|
otherwise the client will hang due to the violation of the
|
|
client/server protocol.
|
|
*/
|
|
thd->spcont->end_partial_result_set= TRUE;
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/**
|
|
Cleanup an instance of this class for re-use
|
|
at next execution of a prepared statement/
|
|
stored procedure statement.
|
|
*/
|
|
|
|
void select_send::cleanup()
|
|
{
|
|
is_result_set_started= FALSE;
|
|
}
|
|
|
|
/* Send data to client. Returns 0 if ok */
|
|
|
|
int select_send::send_data(List<Item> &items)
|
|
{
|
|
Protocol *protocol= thd->protocol;
|
|
DBUG_ENTER("select_send::send_data");
|
|
|
|
/* unit is not set when using 'delete ... returning' */
|
|
if (unit && unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(FALSE);
|
|
}
|
|
if (thd->killed == ABORT_QUERY)
|
|
DBUG_RETURN(FALSE);
|
|
|
|
/*
|
|
We may be passing the control from mysqld to the client: release the
|
|
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
|
by thd
|
|
*/
|
|
ha_release_temporary_latches(thd);
|
|
|
|
protocol->prepare_for_resend();
|
|
if (protocol->send_result_set_row(&items))
|
|
{
|
|
protocol->remove_last_row();
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
|
|
thd->inc_sent_row_count(1);
|
|
|
|
if (thd->vio_ok())
|
|
DBUG_RETURN(protocol->write());
|
|
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
bool select_send::send_eof()
|
|
{
|
|
/*
|
|
We may be passing the control from mysqld to the client: release the
|
|
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
|
|
by thd
|
|
*/
|
|
ha_release_temporary_latches(thd);
|
|
|
|
/*
|
|
Don't send EOF if we're in error condition (which implies we've already
|
|
sent or are sending an error)
|
|
*/
|
|
if (thd->is_error())
|
|
return TRUE;
|
|
::my_eof(thd);
|
|
is_result_set_started= 0;
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
/************************************************************************
|
|
Handling writing to file
|
|
************************************************************************/
|
|
|
|
bool select_to_file::send_eof()
|
|
{
|
|
int error= MY_TEST(end_io_cache(&cache));
|
|
if (mysql_file_close(file, MYF(MY_WME)) || thd->is_error())
|
|
error= true;
|
|
|
|
if (!error)
|
|
{
|
|
::my_ok(thd,row_count);
|
|
}
|
|
file= -1;
|
|
return error;
|
|
}
|
|
|
|
|
|
void select_to_file::cleanup()
|
|
{
|
|
/* In case of error send_eof() may be not called: close the file here. */
|
|
if (file >= 0)
|
|
{
|
|
(void) end_io_cache(&cache);
|
|
mysql_file_close(file, MYF(0));
|
|
file= -1;
|
|
}
|
|
path[0]= '\0';
|
|
row_count= 0;
|
|
}
|
|
|
|
|
|
select_to_file::~select_to_file()
|
|
{
|
|
if (file >= 0)
|
|
{ // This only happens in case of error
|
|
(void) end_io_cache(&cache);
|
|
mysql_file_close(file, MYF(0));
|
|
file= -1;
|
|
}
|
|
}
|
|
|
|
/***************************************************************************
|
|
** Export of select to textfile
|
|
***************************************************************************/
|
|
|
|
select_export::~select_export()
|
|
{
|
|
thd->set_sent_row_count(row_count);
|
|
}
|
|
|
|
|
|
/*
|
|
Create file with IO cache
|
|
|
|
SYNOPSIS
|
|
create_file()
|
|
thd Thread handle
|
|
path File name
|
|
exchange Excange class
|
|
cache IO cache
|
|
|
|
RETURN
|
|
>= 0 File handle
|
|
-1 Error
|
|
*/
|
|
|
|
|
|
static File create_file(THD *thd, char *path, sql_exchange *exchange,
|
|
IO_CACHE *cache)
|
|
{
|
|
File file;
|
|
uint option= MY_UNPACK_FILENAME | MY_RELATIVE_PATH;
|
|
|
|
#ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS
|
|
option|= MY_REPLACE_DIR; // Force use of db directory
|
|
#endif
|
|
|
|
if (!dirname_length(exchange->file_name))
|
|
{
|
|
strxnmov(path, FN_REFLEN-1, mysql_real_data_home, thd->db ? thd->db : "",
|
|
NullS);
|
|
(void) fn_format(path, exchange->file_name, path, "", option);
|
|
}
|
|
else
|
|
(void) fn_format(path, exchange->file_name, mysql_real_data_home, "", option);
|
|
|
|
if (!is_secure_file_path(path))
|
|
{
|
|
/* Write only allowed to dir or subdir specified by secure_file_priv */
|
|
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv");
|
|
return -1;
|
|
}
|
|
|
|
if (!access(path, F_OK))
|
|
{
|
|
my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name);
|
|
return -1;
|
|
}
|
|
/* Create the file world readable */
|
|
if ((file= mysql_file_create(key_select_to_file,
|
|
path, 0666, O_WRONLY|O_EXCL, MYF(MY_WME))) < 0)
|
|
return file;
|
|
#ifdef HAVE_FCHMOD
|
|
(void) fchmod(file, 0666); // Because of umask()
|
|
#else
|
|
(void) chmod(path, 0666);
|
|
#endif
|
|
if (init_io_cache(cache, file, 0L, WRITE_CACHE, 0L, 1, MYF(MY_WME)))
|
|
{
|
|
mysql_file_close(file, MYF(0));
|
|
/* Delete file on error, it was just created */
|
|
mysql_file_delete(key_select_to_file, path, MYF(0));
|
|
return -1;
|
|
}
|
|
return file;
|
|
}
|
|
|
|
|
|
int
|
|
select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|
{
|
|
bool blob_flag=0;
|
|
bool string_results= FALSE, non_string_results= FALSE;
|
|
unit= u;
|
|
if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN)
|
|
strmake_buf(path,exchange->file_name);
|
|
|
|
write_cs= exchange->cs ? exchange->cs : &my_charset_bin;
|
|
|
|
if ((file= create_file(thd, path, exchange, &cache)) < 0)
|
|
return 1;
|
|
/* Check if there is any blobs in data */
|
|
{
|
|
List_iterator_fast<Item> li(list);
|
|
Item *item;
|
|
while ((item=li++))
|
|
{
|
|
if (item->max_length >= MAX_BLOB_WIDTH)
|
|
{
|
|
blob_flag=1;
|
|
break;
|
|
}
|
|
if (item->result_type() == STRING_RESULT)
|
|
string_results= TRUE;
|
|
else
|
|
non_string_results= TRUE;
|
|
}
|
|
}
|
|
if (exchange->escaped->numchars() > 1 || exchange->enclosed->numchars() > 1)
|
|
{
|
|
my_error(ER_WRONG_FIELD_TERMINATORS, MYF(0));
|
|
return TRUE;
|
|
}
|
|
if (exchange->escaped->length() > 1 || exchange->enclosed->length() > 1 ||
|
|
!my_isascii(exchange->escaped->ptr()[0]) ||
|
|
!my_isascii(exchange->enclosed->ptr()[0]) ||
|
|
!exchange->field_term->is_ascii() || !exchange->line_term->is_ascii() ||
|
|
!exchange->line_start->is_ascii())
|
|
{
|
|
/*
|
|
Current LOAD DATA INFILE recognizes field/line separators "as is" without
|
|
converting from client charset to data file charset. So, it is supposed,
|
|
that input file of LOAD DATA INFILE consists of data in one charset and
|
|
separators in other charset. For the compatibility with that [buggy]
|
|
behaviour SELECT INTO OUTFILE implementation has been saved "as is" too,
|
|
but the new warning message has been added:
|
|
|
|
Non-ASCII separator arguments are not fully supported
|
|
*/
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED,
|
|
ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED));
|
|
}
|
|
field_term_length=exchange->field_term->length();
|
|
field_term_char= field_term_length ?
|
|
(int) (uchar) (*exchange->field_term)[0] : INT_MAX;
|
|
if (!exchange->line_term->length())
|
|
exchange->line_term=exchange->field_term; // Use this if it exists
|
|
field_sep_char= (exchange->enclosed->length() ?
|
|
(int) (uchar) (*exchange->enclosed)[0] : field_term_char);
|
|
if (exchange->escaped->length() && (exchange->escaped_given() ||
|
|
!(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
|
|
escape_char= (int) (uchar) (*exchange->escaped)[0];
|
|
else
|
|
escape_char= -1;
|
|
is_ambiguous_field_sep= MY_TEST(strchr(ESCAPE_CHARS, field_sep_char));
|
|
is_unsafe_field_sep= MY_TEST(strchr(NUMERIC_CHARS, field_sep_char));
|
|
line_sep_char= (exchange->line_term->length() ?
|
|
(int) (uchar) (*exchange->line_term)[0] : INT_MAX);
|
|
if (!field_term_length)
|
|
exchange->opt_enclosed=0;
|
|
if (!exchange->enclosed->length())
|
|
exchange->opt_enclosed=1; // A little quicker loop
|
|
fixed_row_size= (!field_term_length && !exchange->enclosed->length() &&
|
|
!blob_flag);
|
|
if ((is_ambiguous_field_sep && exchange->enclosed->is_empty() &&
|
|
(string_results || is_unsafe_field_sep)) ||
|
|
(exchange->opt_enclosed && non_string_results &&
|
|
field_term_length && strchr(NUMERIC_CHARS, field_term_char)))
|
|
{
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
ER_AMBIGUOUS_FIELD_TERM, ER(ER_AMBIGUOUS_FIELD_TERM));
|
|
is_ambiguous_field_term= TRUE;
|
|
}
|
|
else
|
|
is_ambiguous_field_term= FALSE;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
#define NEED_ESCAPING(x) ((int) (uchar) (x) == escape_char || \
|
|
(enclosed ? (int) (uchar) (x) == field_sep_char \
|
|
: (int) (uchar) (x) == field_term_char) || \
|
|
(int) (uchar) (x) == line_sep_char || \
|
|
!(x))
|
|
|
|
int select_export::send_data(List<Item> &items)
|
|
{
|
|
|
|
DBUG_ENTER("select_export::send_data");
|
|
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
|
|
char cvt_buff[MAX_FIELD_WIDTH];
|
|
String cvt_str(cvt_buff, sizeof(cvt_buff), write_cs);
|
|
bool space_inited=0;
|
|
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
|
tmp.length(0);
|
|
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (thd->killed == ABORT_QUERY)
|
|
DBUG_RETURN(0);
|
|
row_count++;
|
|
Item *item;
|
|
uint used_length=0,items_left=items.elements;
|
|
List_iterator_fast<Item> li(items);
|
|
|
|
if (my_b_write(&cache,(uchar*) exchange->line_start->ptr(),
|
|
exchange->line_start->length()))
|
|
goto err;
|
|
while ((item=li++))
|
|
{
|
|
Item_result result_type=item->result_type();
|
|
bool enclosed = (exchange->enclosed->length() &&
|
|
(!exchange->opt_enclosed || result_type == STRING_RESULT));
|
|
res=item->str_result(&tmp);
|
|
if (res && !my_charset_same(write_cs, res->charset()) &&
|
|
!my_charset_same(write_cs, &my_charset_bin))
|
|
{
|
|
const char *well_formed_error_pos;
|
|
const char *cannot_convert_error_pos;
|
|
const char *from_end_pos;
|
|
const char *error_pos;
|
|
uint32 bytes;
|
|
uint64 estimated_bytes=
|
|
((uint64) res->length() / res->charset()->mbminlen + 1) *
|
|
write_cs->mbmaxlen + 1;
|
|
set_if_smaller(estimated_bytes, UINT_MAX32);
|
|
if (cvt_str.realloc((uint32) estimated_bytes))
|
|
{
|
|
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), (uint32) estimated_bytes);
|
|
goto err;
|
|
}
|
|
|
|
bytes= well_formed_copy_nchars(write_cs, (char *) cvt_str.ptr(),
|
|
cvt_str.alloced_length(),
|
|
res->charset(), res->ptr(), res->length(),
|
|
UINT_MAX32, // copy all input chars,
|
|
// i.e. ignore nchars parameter
|
|
&well_formed_error_pos,
|
|
&cannot_convert_error_pos,
|
|
&from_end_pos);
|
|
error_pos= well_formed_error_pos ? well_formed_error_pos
|
|
: cannot_convert_error_pos;
|
|
if (error_pos)
|
|
{
|
|
char printable_buff[32];
|
|
convert_to_printable(printable_buff, sizeof(printable_buff),
|
|
error_pos, res->ptr() + res->length() - error_pos,
|
|
res->charset(), 6);
|
|
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
|
|
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
|
|
"string", printable_buff,
|
|
item->name, static_cast<long>(row_count));
|
|
}
|
|
else if (from_end_pos < res->ptr() + res->length())
|
|
{
|
|
/*
|
|
result is longer than UINT_MAX32 and doesn't fit into String
|
|
*/
|
|
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED),
|
|
item->full_name(), static_cast<long>(row_count));
|
|
}
|
|
cvt_str.length(bytes);
|
|
res= &cvt_str;
|
|
}
|
|
if (res && enclosed)
|
|
{
|
|
if (my_b_write(&cache,(uchar*) exchange->enclosed->ptr(),
|
|
exchange->enclosed->length()))
|
|
goto err;
|
|
}
|
|
if (!res)
|
|
{ // NULL
|
|
if (!fixed_row_size)
|
|
{
|
|
if (escape_char != -1) // Use \N syntax
|
|
{
|
|
null_buff[0]=escape_char;
|
|
null_buff[1]='N';
|
|
if (my_b_write(&cache,(uchar*) null_buff,2))
|
|
goto err;
|
|
}
|
|
else if (my_b_write(&cache,(uchar*) "NULL",4))
|
|
goto err;
|
|
}
|
|
else
|
|
{
|
|
used_length=0; // Fill with space
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (fixed_row_size)
|
|
used_length=MY_MIN(res->length(),item->max_length);
|
|
else
|
|
used_length=res->length();
|
|
if ((result_type == STRING_RESULT || is_unsafe_field_sep) &&
|
|
escape_char != -1)
|
|
{
|
|
char *pos, *start, *end;
|
|
CHARSET_INFO *res_charset= res->charset();
|
|
CHARSET_INFO *character_set_client= thd->variables.
|
|
character_set_client;
|
|
bool check_second_byte= (res_charset == &my_charset_bin) &&
|
|
character_set_client->
|
|
escape_with_backslash_is_dangerous;
|
|
DBUG_ASSERT(character_set_client->mbmaxlen == 2 ||
|
|
!character_set_client->escape_with_backslash_is_dangerous);
|
|
for (start=pos=(char*) res->ptr(),end=pos+used_length ;
|
|
pos != end ;
|
|
pos++)
|
|
{
|
|
#ifdef USE_MB
|
|
if (use_mb(res_charset))
|
|
{
|
|
int l;
|
|
if ((l=my_ismbchar(res_charset, pos, end)))
|
|
{
|
|
pos += l-1;
|
|
continue;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
Special case when dumping BINARY/VARBINARY/BLOB values
|
|
for the clients with character sets big5, cp932, gbk and sjis,
|
|
which can have the escape character (0x5C "\" by default)
|
|
as the second byte of a multi-byte sequence.
|
|
|
|
If
|
|
- pos[0] is a valid multi-byte head (e.g 0xEE) and
|
|
- pos[1] is 0x00, which will be escaped as "\0",
|
|
|
|
then we'll get "0xEE + 0x5C + 0x30" in the output file.
|
|
|
|
If this file is later loaded using this sequence of commands:
|
|
|
|
mysql> create table t1 (a varchar(128)) character set big5;
|
|
mysql> LOAD DATA INFILE 'dump.txt' INTO TABLE t1;
|
|
|
|
then 0x5C will be misinterpreted as the second byte
|
|
of a multi-byte character "0xEE + 0x5C", instead of
|
|
escape character for 0x00.
|
|
|
|
To avoid this confusion, we'll escape the multi-byte
|
|
head character too, so the sequence "0xEE + 0x00" will be
|
|
dumped as "0x5C + 0xEE + 0x5C + 0x30".
|
|
|
|
Note, in the condition below we only check if
|
|
mbcharlen is equal to 2, because there are no
|
|
character sets with mbmaxlen longer than 2
|
|
and with escape_with_backslash_is_dangerous set.
|
|
DBUG_ASSERT before the loop makes that sure.
|
|
*/
|
|
|
|
if ((NEED_ESCAPING(*pos) ||
|
|
(check_second_byte &&
|
|
my_mbcharlen(character_set_client, (uchar) *pos) == 2 &&
|
|
pos + 1 < end &&
|
|
NEED_ESCAPING(pos[1]))) &&
|
|
/*
|
|
Don't escape field_term_char by doubling - doubling is only
|
|
valid for ENCLOSED BY characters:
|
|
*/
|
|
(enclosed || !is_ambiguous_field_term ||
|
|
(int) (uchar) *pos != field_term_char))
|
|
{
|
|
char tmp_buff[2];
|
|
tmp_buff[0]= ((int) (uchar) *pos == field_sep_char &&
|
|
is_ambiguous_field_sep) ?
|
|
field_sep_char : escape_char;
|
|
tmp_buff[1]= *pos ? *pos : '0';
|
|
if (my_b_write(&cache,(uchar*) start,(uint) (pos-start)) ||
|
|
my_b_write(&cache,(uchar*) tmp_buff,2))
|
|
goto err;
|
|
start=pos+1;
|
|
}
|
|
}
|
|
if (my_b_write(&cache,(uchar*) start,(uint) (pos-start)))
|
|
goto err;
|
|
}
|
|
else if (my_b_write(&cache,(uchar*) res->ptr(),used_length))
|
|
goto err;
|
|
}
|
|
if (fixed_row_size)
|
|
{ // Fill with space
|
|
if (item->max_length > used_length)
|
|
{
|
|
if (!space_inited)
|
|
{
|
|
space_inited=1;
|
|
bfill(space,sizeof(space),' ');
|
|
}
|
|
uint length=item->max_length-used_length;
|
|
for (; length > sizeof(space) ; length-=sizeof(space))
|
|
{
|
|
if (my_b_write(&cache,(uchar*) space,sizeof(space)))
|
|
goto err;
|
|
}
|
|
if (my_b_write(&cache,(uchar*) space,length))
|
|
goto err;
|
|
}
|
|
}
|
|
if (res && enclosed)
|
|
{
|
|
if (my_b_write(&cache, (uchar*) exchange->enclosed->ptr(),
|
|
exchange->enclosed->length()))
|
|
goto err;
|
|
}
|
|
if (--items_left)
|
|
{
|
|
if (my_b_write(&cache, (uchar*) exchange->field_term->ptr(),
|
|
field_term_length))
|
|
goto err;
|
|
}
|
|
}
|
|
if (my_b_write(&cache,(uchar*) exchange->line_term->ptr(),
|
|
exchange->line_term->length()))
|
|
goto err;
|
|
DBUG_RETURN(0);
|
|
err:
|
|
DBUG_RETURN(1);
|
|
}
|
|
|
|
|
|
/***************************************************************************
|
|
** Dump of select to a binary file
|
|
***************************************************************************/
|
|
|
|
|
|
int
|
|
select_dump::prepare(List<Item> &list __attribute__((unused)),
|
|
SELECT_LEX_UNIT *u)
|
|
{
|
|
unit= u;
|
|
return (int) ((file= create_file(thd, path, exchange, &cache)) < 0);
|
|
}
|
|
|
|
|
|
int select_dump::send_data(List<Item> &items)
|
|
{
|
|
List_iterator_fast<Item> li(items);
|
|
char buff[MAX_FIELD_WIDTH];
|
|
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
|
tmp.length(0);
|
|
Item *item;
|
|
DBUG_ENTER("select_dump::send_data");
|
|
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (thd->killed == ABORT_QUERY)
|
|
DBUG_RETURN(0);
|
|
|
|
if (row_count++ > 1)
|
|
{
|
|
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
|
|
goto err;
|
|
}
|
|
while ((item=li++))
|
|
{
|
|
res=item->str_result(&tmp);
|
|
if (!res) // If NULL
|
|
{
|
|
if (my_b_write(&cache,(uchar*) "",1))
|
|
goto err;
|
|
}
|
|
else if (my_b_write(&cache,(uchar*) res->ptr(),res->length()))
|
|
{
|
|
my_error(ER_ERROR_ON_WRITE, MYF(0), path, my_errno);
|
|
goto err;
|
|
}
|
|
}
|
|
DBUG_RETURN(0);
|
|
err:
|
|
DBUG_RETURN(1);
|
|
}
|
|
|
|
|
|
select_subselect::select_subselect(Item_subselect *item_arg)
|
|
{
|
|
item= item_arg;
|
|
}
|
|
|
|
|
|
int select_singlerow_subselect::send_data(List<Item> &items)
|
|
{
|
|
DBUG_ENTER("select_singlerow_subselect::send_data");
|
|
Item_singlerow_subselect *it= (Item_singlerow_subselect *)item;
|
|
if (it->assigned())
|
|
{
|
|
my_message(ER_SUBQUERY_NO_1_ROW, ER(ER_SUBQUERY_NO_1_ROW),
|
|
MYF(current_thd->lex->ignore ? ME_JUST_WARNING : 0));
|
|
DBUG_RETURN(1);
|
|
}
|
|
if (unit->offset_limit_cnt)
|
|
{ // Using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (thd->killed == ABORT_QUERY)
|
|
DBUG_RETURN(0);
|
|
List_iterator_fast<Item> li(items);
|
|
Item *val_item;
|
|
for (uint i= 0; (val_item= li++); i++)
|
|
it->store(i, val_item);
|
|
it->assigned(1);
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
void select_max_min_finder_subselect::cleanup()
|
|
{
|
|
DBUG_ENTER("select_max_min_finder_subselect::cleanup");
|
|
cache= 0;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
int select_max_min_finder_subselect::send_data(List<Item> &items)
|
|
{
|
|
DBUG_ENTER("select_max_min_finder_subselect::send_data");
|
|
Item_maxmin_subselect *it= (Item_maxmin_subselect *)item;
|
|
List_iterator_fast<Item> li(items);
|
|
Item *val_item= li++;
|
|
it->register_value();
|
|
if (it->assigned())
|
|
{
|
|
cache->store(val_item);
|
|
if ((this->*op)())
|
|
it->store(0, cache);
|
|
}
|
|
else
|
|
{
|
|
if (!cache)
|
|
{
|
|
cache= Item_cache::get_cache(val_item);
|
|
switch (val_item->result_type()) {
|
|
case REAL_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_real;
|
|
break;
|
|
case INT_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_int;
|
|
break;
|
|
case STRING_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_str;
|
|
break;
|
|
case DECIMAL_RESULT:
|
|
op= &select_max_min_finder_subselect::cmp_decimal;
|
|
break;
|
|
case ROW_RESULT:
|
|
case TIME_RESULT:
|
|
case IMPOSSIBLE_RESULT:
|
|
// This case should never be choosen
|
|
DBUG_ASSERT(0);
|
|
op= 0;
|
|
}
|
|
}
|
|
cache->store(val_item);
|
|
it->store(0, cache);
|
|
}
|
|
it->assigned(1);
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_real()
|
|
{
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
double val1= cache->val_real(), val2= maxmin->val_real();
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return(val1 > val2);
|
|
return (val1 < val2);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_int()
|
|
{
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
longlong val1= cache->val_int(), val2= maxmin->val_int();
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return(val1 > val2);
|
|
return (val1 < val2);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_decimal()
|
|
{
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
my_decimal cval, *cvalue= cache->val_decimal(&cval);
|
|
my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return (my_decimal_cmp(cvalue, mvalue) > 0) ;
|
|
return (my_decimal_cmp(cvalue,mvalue) < 0);
|
|
}
|
|
|
|
bool select_max_min_finder_subselect::cmp_str()
|
|
{
|
|
String *val1, *val2, buf1, buf2;
|
|
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
|
|
/*
|
|
as far as both operand is Item_cache buf1 & buf2 will not be used,
|
|
but added for safety
|
|
*/
|
|
val1= cache->val_str(&buf1);
|
|
val2= maxmin->val_str(&buf1);
|
|
|
|
/* Ignore NULLs for ANY and keep them for ALL subqueries */
|
|
if (cache->null_value)
|
|
return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
|
|
if (maxmin->null_value)
|
|
return !is_all;
|
|
|
|
if (fmax)
|
|
return (sortcmp(val1, val2, cache->collation.collation) > 0) ;
|
|
return (sortcmp(val1, val2, cache->collation.collation) < 0);
|
|
}
|
|
|
|
int select_exists_subselect::send_data(List<Item> &items)
|
|
{
|
|
DBUG_ENTER("select_exists_subselect::send_data");
|
|
Item_exists_subselect *it= (Item_exists_subselect *)item;
|
|
if (unit->offset_limit_cnt)
|
|
{ // Using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (thd->killed == ABORT_QUERY)
|
|
DBUG_RETURN(0);
|
|
it->value= 1;
|
|
it->assigned(1);
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
/***************************************************************************
|
|
Dump of select to variables
|
|
***************************************************************************/
|
|
|
|
int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
|
{
|
|
unit= u;
|
|
|
|
if (var_list.elements != list.elements)
|
|
{
|
|
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
|
|
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0));
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
bool select_dumpvar::check_simple_select() const
|
|
{
|
|
my_error(ER_SP_BAD_CURSOR_SELECT, MYF(0));
|
|
return TRUE;
|
|
}
|
|
|
|
|
|
void select_dumpvar::cleanup()
|
|
{
|
|
row_count= 0;
|
|
}
|
|
|
|
|
|
Query_arena::Type Query_arena::type() const
|
|
{
|
|
DBUG_ASSERT(0); /* Should never be called */
|
|
return STATEMENT;
|
|
}
|
|
|
|
|
|
void Query_arena::free_items()
|
|
{
|
|
Item *next;
|
|
DBUG_ENTER("Query_arena::free_items");
|
|
/* This works because items are allocated with sql_alloc() */
|
|
for (; free_list; free_list= next)
|
|
{
|
|
next= free_list->next;
|
|
DBUG_ASSERT(free_list != next);
|
|
free_list->delete_self();
|
|
}
|
|
/* Postcondition: free_list is 0 */
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void Query_arena::set_query_arena(Query_arena *set)
|
|
{
|
|
mem_root= set->mem_root;
|
|
free_list= set->free_list;
|
|
state= set->state;
|
|
}
|
|
|
|
|
|
void Query_arena::cleanup_stmt()
|
|
{
|
|
DBUG_ASSERT(! "Query_arena::cleanup_stmt() not implemented");
|
|
}
|
|
|
|
/*
|
|
Statement functions
|
|
*/
|
|
|
|
Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg,
|
|
enum enum_state state_arg, ulong id_arg)
|
|
:Query_arena(mem_root_arg, state_arg),
|
|
id(id_arg),
|
|
mark_used_columns(MARK_COLUMNS_READ),
|
|
lex(lex_arg),
|
|
db(NULL),
|
|
db_length(0)
|
|
{
|
|
name.str= NULL;
|
|
}
|
|
|
|
|
|
Query_arena::Type Statement::type() const
|
|
{
|
|
return STATEMENT;
|
|
}
|
|
|
|
|
|
void Statement::set_statement(Statement *stmt)
|
|
{
|
|
id= stmt->id;
|
|
mark_used_columns= stmt->mark_used_columns;
|
|
lex= stmt->lex;
|
|
query_string= stmt->query_string;
|
|
}
|
|
|
|
|
|
void
|
|
Statement::set_n_backup_statement(Statement *stmt, Statement *backup)
|
|
{
|
|
DBUG_ENTER("Statement::set_n_backup_statement");
|
|
backup->set_statement(this);
|
|
set_statement(stmt);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void Statement::restore_backup_statement(Statement *stmt, Statement *backup)
|
|
{
|
|
DBUG_ENTER("Statement::restore_backup_statement");
|
|
stmt->set_statement(this);
|
|
set_statement(backup);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::end_statement()
|
|
{
|
|
DBUG_ENTER("THD::end_statement");
|
|
/* Cleanup SQL processing state to reuse this statement in next query. */
|
|
lex_end(lex);
|
|
delete lex->result;
|
|
lex->result= 0;
|
|
/* Note that free_list is freed in cleanup_after_query() */
|
|
|
|
/*
|
|
Don't free mem_root, as mem_root is freed in the end of dispatch_command
|
|
(once for any command).
|
|
*/
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Start using arena specified by @set. Current arena data will be saved to
|
|
*backup.
|
|
*/
|
|
void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup)
|
|
{
|
|
DBUG_ENTER("THD::set_n_backup_active_arena");
|
|
DBUG_ASSERT(backup->is_backup_arena == FALSE);
|
|
|
|
backup->set_query_arena(this);
|
|
set_query_arena(set);
|
|
#ifndef DBUG_OFF
|
|
backup->is_backup_arena= TRUE;
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
/*
|
|
Stop using the temporary arena, and start again using the arena that is
|
|
specified in *backup.
|
|
The temporary arena is returned back into *set.
|
|
*/
|
|
|
|
void THD::restore_active_arena(Query_arena *set, Query_arena *backup)
|
|
{
|
|
DBUG_ENTER("THD::restore_active_arena");
|
|
DBUG_ASSERT(backup->is_backup_arena);
|
|
set->set_query_arena(this);
|
|
set_query_arena(backup);
|
|
#ifndef DBUG_OFF
|
|
backup->is_backup_arena= FALSE;
|
|
#endif
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
Statement::~Statement()
|
|
{
|
|
}
|
|
|
|
C_MODE_START
|
|
|
|
static uchar *
|
|
get_statement_id_as_hash_key(const uchar *record, size_t *key_length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
const Statement *statement= (const Statement *) record;
|
|
*key_length= sizeof(statement->id);
|
|
return (uchar *) &((const Statement *) statement)->id;
|
|
}
|
|
|
|
static void delete_statement_as_hash_key(void *key)
|
|
{
|
|
delete (Statement *) key;
|
|
}
|
|
|
|
static uchar *get_stmt_name_hash_key(Statement *entry, size_t *length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
*length= entry->name.length;
|
|
return (uchar*) entry->name.str;
|
|
}
|
|
|
|
C_MODE_END
|
|
|
|
Statement_map::Statement_map() :
|
|
last_found_statement(0)
|
|
{
|
|
enum
|
|
{
|
|
START_STMT_HASH_SIZE = 16,
|
|
START_NAME_HASH_SIZE = 16
|
|
};
|
|
my_hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
|
|
get_statement_id_as_hash_key,
|
|
delete_statement_as_hash_key, MYF(0));
|
|
my_hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
|
|
(my_hash_get_key) get_stmt_name_hash_key,
|
|
NULL,MYF(0));
|
|
}
|
|
|
|
|
|
/*
|
|
Insert a new statement to the thread-local statement map.
|
|
|
|
DESCRIPTION
|
|
If there was an old statement with the same name, replace it with the
|
|
new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
|
|
increase prepared_stmt_count, and insert the new statement. It's okay
|
|
to delete an old statement and fail to insert the new one.
|
|
|
|
POSTCONDITIONS
|
|
All named prepared statements are also present in names_hash.
|
|
Statement names in names_hash are unique.
|
|
The statement is added only if prepared_stmt_count < max_prepard_stmt_count
|
|
last_found_statement always points to a valid statement or is 0
|
|
|
|
RETURN VALUE
|
|
0 success
|
|
1 error: out of resources or max_prepared_stmt_count limit has been
|
|
reached. An error is sent to the client, the statement is deleted.
|
|
*/
|
|
|
|
int Statement_map::insert(THD *thd, Statement *statement)
|
|
{
|
|
if (my_hash_insert(&st_hash, (uchar*) statement))
|
|
{
|
|
/*
|
|
Delete is needed only in case of an insert failure. In all other
|
|
cases hash_delete will also delete the statement.
|
|
*/
|
|
delete statement;
|
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
|
goto err_st_hash;
|
|
}
|
|
if (statement->name.str && my_hash_insert(&names_hash, (uchar*) statement))
|
|
{
|
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
|
goto err_names_hash;
|
|
}
|
|
mysql_mutex_lock(&LOCK_prepared_stmt_count);
|
|
/*
|
|
We don't check that prepared_stmt_count is <= max_prepared_stmt_count
|
|
because we would like to allow to lower the total limit
|
|
of prepared statements below the current count. In that case
|
|
no new statements can be added until prepared_stmt_count drops below
|
|
the limit.
|
|
*/
|
|
if (prepared_stmt_count >= max_prepared_stmt_count)
|
|
{
|
|
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
|
|
max_prepared_stmt_count);
|
|
goto err_max;
|
|
}
|
|
prepared_stmt_count++;
|
|
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
last_found_statement= statement;
|
|
return 0;
|
|
|
|
err_max:
|
|
if (statement->name.str)
|
|
my_hash_delete(&names_hash, (uchar*) statement);
|
|
err_names_hash:
|
|
my_hash_delete(&st_hash, (uchar*) statement);
|
|
err_st_hash:
|
|
return 1;
|
|
}
|
|
|
|
|
|
void Statement_map::close_transient_cursors()
|
|
{
|
|
#ifdef TO_BE_IMPLEMENTED
|
|
Statement *stmt;
|
|
while ((stmt= transient_cursor_list.head()))
|
|
stmt->close_cursor(); /* deletes itself from the list */
|
|
#endif
|
|
}
|
|
|
|
|
|
void Statement_map::erase(Statement *statement)
|
|
{
|
|
if (statement == last_found_statement)
|
|
last_found_statement= 0;
|
|
if (statement->name.str)
|
|
my_hash_delete(&names_hash, (uchar *) statement);
|
|
|
|
my_hash_delete(&st_hash, (uchar *) statement);
|
|
mysql_mutex_lock(&LOCK_prepared_stmt_count);
|
|
DBUG_ASSERT(prepared_stmt_count > 0);
|
|
prepared_stmt_count--;
|
|
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
}
|
|
|
|
|
|
void Statement_map::reset()
|
|
{
|
|
/* Must be first, hash_free will reset st_hash.records */
|
|
mysql_mutex_lock(&LOCK_prepared_stmt_count);
|
|
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
|
|
prepared_stmt_count-= st_hash.records;
|
|
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
my_hash_reset(&names_hash);
|
|
my_hash_reset(&st_hash);
|
|
last_found_statement= 0;
|
|
}
|
|
|
|
|
|
Statement_map::~Statement_map()
|
|
{
|
|
/* Must go first, hash_free will reset st_hash.records */
|
|
mysql_mutex_lock(&LOCK_prepared_stmt_count);
|
|
DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
|
|
prepared_stmt_count-= st_hash.records;
|
|
mysql_mutex_unlock(&LOCK_prepared_stmt_count);
|
|
|
|
my_hash_free(&names_hash);
|
|
my_hash_free(&st_hash);
|
|
}
|
|
|
|
int select_dumpvar::send_data(List<Item> &items)
|
|
{
|
|
List_iterator_fast<my_var> var_li(var_list);
|
|
List_iterator<Item> it(items);
|
|
Item *item;
|
|
my_var *mv;
|
|
DBUG_ENTER("select_dumpvar::send_data");
|
|
|
|
if (unit->offset_limit_cnt)
|
|
{ // using limit offset,count
|
|
unit->offset_limit_cnt--;
|
|
DBUG_RETURN(0);
|
|
}
|
|
if (row_count++)
|
|
{
|
|
my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0));
|
|
DBUG_RETURN(1);
|
|
}
|
|
while ((mv= var_li++) && (item= it++))
|
|
{
|
|
if (mv->local)
|
|
{
|
|
if (thd->spcont->set_variable(thd, mv->offset, &item))
|
|
DBUG_RETURN(1);
|
|
}
|
|
else
|
|
{
|
|
Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item);
|
|
suv->save_item_result(item);
|
|
if (suv->fix_fields(thd, 0))
|
|
DBUG_RETURN (1);
|
|
if (suv->update())
|
|
DBUG_RETURN (1);
|
|
}
|
|
}
|
|
DBUG_RETURN(thd->is_error());
|
|
}
|
|
|
|
bool select_dumpvar::send_eof()
|
|
{
|
|
if (! row_count)
|
|
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
|
ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA));
|
|
/*
|
|
Don't send EOF if we're in error condition (which implies we've already
|
|
sent or are sending an error)
|
|
*/
|
|
if (thd->is_error())
|
|
return true;
|
|
|
|
::my_ok(thd,row_count);
|
|
return 0;
|
|
}
|
|
|
|
|
|
bool
|
|
select_materialize_with_stats::
|
|
create_result_table(THD *thd_arg, List<Item> *column_types,
|
|
bool is_union_distinct, ulonglong options,
|
|
const char *table_alias, bool bit_fields_as_long,
|
|
bool create_table,
|
|
bool keep_row_order)
|
|
{
|
|
DBUG_ASSERT(table == 0);
|
|
tmp_table_param.field_count= column_types->elements;
|
|
tmp_table_param.bit_fields_as_long= bit_fields_as_long;
|
|
|
|
if (! (table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
|
|
(ORDER*) 0, is_union_distinct, 1,
|
|
options, HA_POS_ERROR, (char*) table_alias,
|
|
keep_row_order)))
|
|
return TRUE;
|
|
|
|
col_stat= (Column_statistics*) table->in_use->alloc(table->s->fields *
|
|
sizeof(Column_statistics));
|
|
if (!col_stat)
|
|
return TRUE;
|
|
|
|
reset();
|
|
table->file->extra(HA_EXTRA_WRITE_CACHE);
|
|
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
|
return FALSE;
|
|
}
|
|
|
|
|
|
void select_materialize_with_stats::reset()
|
|
{
|
|
memset(col_stat, 0, table->s->fields * sizeof(Column_statistics));
|
|
max_nulls_in_row= 0;
|
|
count_rows= 0;
|
|
}
|
|
|
|
|
|
void select_materialize_with_stats::cleanup()
|
|
{
|
|
reset();
|
|
select_union::cleanup();
|
|
}
|
|
|
|
|
|
/**
|
|
Override select_union::send_data to analyze each row for NULLs and to
|
|
update null_statistics before sending data to the client.
|
|
|
|
@return TRUE if fatal error when sending data to the client
|
|
@return FALSE on success
|
|
*/
|
|
|
|
int select_materialize_with_stats::send_data(List<Item> &items)
|
|
{
|
|
List_iterator_fast<Item> item_it(items);
|
|
Item *cur_item;
|
|
Column_statistics *cur_col_stat= col_stat;
|
|
uint nulls_in_row= 0;
|
|
int res;
|
|
|
|
if ((res= select_union::send_data(items)))
|
|
return res;
|
|
if (table->null_catch_flags & REJECT_ROW_DUE_TO_NULL_FIELDS)
|
|
{
|
|
table->null_catch_flags&= ~REJECT_ROW_DUE_TO_NULL_FIELDS;
|
|
return 0;
|
|
}
|
|
/* Skip duplicate rows. */
|
|
if (write_err == HA_ERR_FOUND_DUPP_KEY ||
|
|
write_err == HA_ERR_FOUND_DUPP_UNIQUE)
|
|
return 0;
|
|
|
|
++count_rows;
|
|
|
|
while ((cur_item= item_it++))
|
|
{
|
|
if (cur_item->is_null_result())
|
|
{
|
|
++cur_col_stat->null_count;
|
|
cur_col_stat->max_null_row= count_rows;
|
|
if (!cur_col_stat->min_null_row)
|
|
cur_col_stat->min_null_row= count_rows;
|
|
++nulls_in_row;
|
|
}
|
|
++cur_col_stat;
|
|
}
|
|
if (nulls_in_row > max_nulls_in_row)
|
|
max_nulls_in_row= nulls_in_row;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/****************************************************************************
|
|
TMP_TABLE_PARAM
|
|
****************************************************************************/
|
|
|
|
void TMP_TABLE_PARAM::init()
|
|
{
|
|
DBUG_ENTER("TMP_TABLE_PARAM::init");
|
|
DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
|
|
field_count= sum_func_count= func_count= hidden_field_count= 0;
|
|
group_parts= group_length= group_null_parts= 0;
|
|
quick_group= 1;
|
|
table_charset= 0;
|
|
precomputed_group_by= 0;
|
|
bit_fields_as_long= 0;
|
|
materialized_subquery= 0;
|
|
force_not_null_cols= 0;
|
|
skip_create_table= 0;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void thd_increment_bytes_sent(ulong length)
|
|
{
|
|
THD *thd=current_thd;
|
|
if (likely(thd != 0))
|
|
{
|
|
/* current_thd == 0 when close_connection() calls net_send_error() */
|
|
thd->status_var.bytes_sent+= length;
|
|
}
|
|
}
|
|
|
|
|
|
void thd_increment_bytes_received(ulong length)
|
|
{
|
|
current_thd->status_var.bytes_received+= length;
|
|
}
|
|
|
|
|
|
void thd_increment_net_big_packet_count(ulong length)
|
|
{
|
|
current_thd->status_var.net_big_packet_count+= length;
|
|
}
|
|
|
|
|
|
void THD::set_status_var_init()
|
|
{
|
|
bzero((char*) &status_var, offsetof(STATUS_VAR,
|
|
last_cleared_system_status_var));
|
|
}
|
|
|
|
|
|
void Security_context::init()
|
|
{
|
|
host= user= ip= external_user= 0;
|
|
host_or_ip= "connecting host";
|
|
priv_user[0]= priv_host[0]= proxy_user[0]= priv_role[0]= '\0';
|
|
master_access= 0;
|
|
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
|
db_access= NO_ACCESS;
|
|
#endif
|
|
}
|
|
|
|
|
|
void Security_context::destroy()
|
|
{
|
|
DBUG_PRINT("info", ("freeing security context"));
|
|
// If not pointer to constant
|
|
if (host != my_localhost)
|
|
{
|
|
my_free(host);
|
|
host= NULL;
|
|
}
|
|
if (user != delayed_user)
|
|
{
|
|
my_free(user);
|
|
user= NULL;
|
|
}
|
|
|
|
if (external_user)
|
|
{
|
|
my_free(external_user);
|
|
user= NULL;
|
|
}
|
|
|
|
my_free(ip);
|
|
ip= NULL;
|
|
}
|
|
|
|
|
|
void Security_context::skip_grants()
|
|
{
|
|
/* privileges for the user are unknown everything is allowed */
|
|
host_or_ip= (char *)"";
|
|
master_access= ~NO_ACCESS;
|
|
*priv_user= *priv_host= '\0';
|
|
}
|
|
|
|
|
|
bool Security_context::set_user(char *user_arg)
|
|
{
|
|
my_free(user);
|
|
user= my_strdup(user_arg, MYF(0));
|
|
return user == 0;
|
|
}
|
|
|
|
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
|
/**
|
|
Initialize this security context from the passed in credentials
|
|
and activate it in the current thread.
|
|
|
|
@param thd
|
|
@param definer_user
|
|
@param definer_host
|
|
@param db
|
|
@param[out] backup Save a pointer to the current security context
|
|
in the thread. In case of success it points to the
|
|
saved old context, otherwise it points to NULL.
|
|
|
|
|
|
During execution of a statement, multiple security contexts may
|
|
be needed:
|
|
- the security context of the authenticated user, used as the
|
|
default security context for all top-level statements
|
|
- in case of a view or a stored program, possibly the security
|
|
context of the definer of the routine, if the object is
|
|
defined with SQL SECURITY DEFINER option.
|
|
|
|
The currently "active" security context is parameterized in THD
|
|
member security_ctx. By default, after a connection is
|
|
established, this member points at the "main" security context
|
|
- the credentials of the authenticated user.
|
|
|
|
Later, if we would like to execute some sub-statement or a part
|
|
of a statement under credentials of a different user, e.g.
|
|
definer of a procedure, we authenticate this user in a local
|
|
instance of Security_context by means of this method (and
|
|
ultimately by means of acl_getroot), and make the
|
|
local instance active in the thread by re-setting
|
|
thd->security_ctx pointer.
|
|
|
|
Note, that the life cycle and memory management of the "main" and
|
|
temporary security contexts are different.
|
|
For the main security context, the memory for user/host/ip is
|
|
allocated on system heap, and the THD class frees this memory in
|
|
its destructor. The only case when contents of the main security
|
|
context may change during its life time is when someone issued
|
|
CHANGE USER command.
|
|
Memory management of a "temporary" security context is
|
|
responsibility of the module that creates it.
|
|
|
|
@retval TRUE there is no user with the given credentials. The erro
|
|
is reported in the thread.
|
|
@retval FALSE success
|
|
*/
|
|
|
|
bool
|
|
Security_context::
|
|
change_security_context(THD *thd,
|
|
LEX_STRING *definer_user,
|
|
LEX_STRING *definer_host,
|
|
LEX_STRING *db,
|
|
Security_context **backup)
|
|
{
|
|
bool needs_change;
|
|
|
|
DBUG_ENTER("Security_context::change_security_context");
|
|
|
|
DBUG_ASSERT(definer_user->str && definer_host->str);
|
|
|
|
*backup= NULL;
|
|
needs_change= (strcmp(definer_user->str, thd->security_ctx->priv_user) ||
|
|
my_strcasecmp(system_charset_info, definer_host->str,
|
|
thd->security_ctx->priv_host));
|
|
if (needs_change)
|
|
{
|
|
if (acl_getroot(this, definer_user->str, definer_host->str,
|
|
definer_host->str, db->str))
|
|
{
|
|
my_error(ER_NO_SUCH_USER, MYF(0), definer_user->str,
|
|
definer_host->str);
|
|
DBUG_RETURN(TRUE);
|
|
}
|
|
*backup= thd->security_ctx;
|
|
thd->security_ctx= this;
|
|
}
|
|
|
|
DBUG_RETURN(FALSE);
|
|
}
|
|
|
|
|
|
void
|
|
Security_context::restore_security_context(THD *thd,
|
|
Security_context *backup)
|
|
{
|
|
if (backup)
|
|
thd->security_ctx= backup;
|
|
}
|
|
#endif
|
|
|
|
|
|
bool Security_context::user_matches(Security_context *them)
|
|
{
|
|
return ((user != NULL) && (them->user != NULL) &&
|
|
!strcmp(user, them->user));
|
|
}
|
|
|
|
|
|
/****************************************************************************
|
|
Handling of open and locked tables states.
|
|
|
|
This is used when we want to open/lock (and then close) some tables when
|
|
we already have a set of tables open and locked. We use these methods for
|
|
access to mysql.proc table to find definitions of stored routines.
|
|
****************************************************************************/
|
|
|
|
void THD::reset_n_backup_open_tables_state(Open_tables_backup *backup)
|
|
{
|
|
DBUG_ENTER("reset_n_backup_open_tables_state");
|
|
backup->set_open_tables_state(this);
|
|
backup->mdl_system_tables_svp= mdl_context.mdl_savepoint();
|
|
reset_open_tables_state(this);
|
|
state_flags|= Open_tables_state::BACKUPS_AVAIL;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::restore_backup_open_tables_state(Open_tables_backup *backup)
|
|
{
|
|
DBUG_ENTER("restore_backup_open_tables_state");
|
|
mdl_context.rollback_to_savepoint(backup->mdl_system_tables_svp);
|
|
/*
|
|
Before we will throw away current open tables state we want
|
|
to be sure that it was properly cleaned up.
|
|
*/
|
|
DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 &&
|
|
derived_tables == 0 &&
|
|
lock == 0 &&
|
|
locked_tables_mode == LTM_NONE &&
|
|
m_reprepare_observer == NULL);
|
|
|
|
set_open_tables_state(backup);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
#if MARIA_PLUGIN_INTERFACE_VERSION < 0x0200
|
|
/**
|
|
This is a backward compatibility method, made obsolete
|
|
by the thd_kill_statement service. Keep it here to avoid breaking the
|
|
ABI in case some binary plugins still use it.
|
|
*/
|
|
#undef thd_killed
|
|
extern "C" int thd_killed(const MYSQL_THD thd)
|
|
{
|
|
return thd_kill_level(thd) > THD_ABORT_SOFTLY;
|
|
}
|
|
#else
|
|
#error now thd_killed() function can go away
|
|
#endif
|
|
|
|
/*
|
|
return thd->killed status to the client,
|
|
mapped to the API enum thd_kill_levels values.
|
|
*/
|
|
extern "C" enum thd_kill_levels thd_kill_level(const MYSQL_THD thd)
|
|
{
|
|
THD* current= current_thd;
|
|
|
|
if (!thd)
|
|
thd= current;
|
|
|
|
if (thd == current)
|
|
{
|
|
Apc_target *apc_target= (Apc_target*)&thd->apc_target;
|
|
if (apc_target->have_apc_requests())
|
|
apc_target->process_apc_requests();
|
|
}
|
|
|
|
if (likely(thd->killed == NOT_KILLED))
|
|
return THD_IS_NOT_KILLED;
|
|
|
|
return thd->killed & KILL_HARD_BIT ? THD_ABORT_ASAP : THD_ABORT_SOFTLY;
|
|
}
|
|
|
|
|
|
/**
|
|
Send an out-of-band progress report to the client
|
|
|
|
The report is sent every 'thd->...progress_report_time' second,
|
|
however not more often than global.progress_report_time.
|
|
If global.progress_report_time is 0, then don't send progress reports, but
|
|
check every second if the value has changed
|
|
*/
|
|
|
|
static void thd_send_progress(THD *thd)
|
|
{
|
|
/* Check if we should send the client a progress report */
|
|
ulonglong report_time= my_interval_timer();
|
|
if (report_time > thd->progress.next_report_time)
|
|
{
|
|
uint seconds_to_next= MY_MAX(thd->variables.progress_report_time,
|
|
global_system_variables.progress_report_time);
|
|
if (seconds_to_next == 0) // Turned off
|
|
seconds_to_next= 1; // Check again after 1 second
|
|
|
|
thd->progress.next_report_time= (report_time +
|
|
seconds_to_next * 1000000000ULL);
|
|
if (global_system_variables.progress_report_time &&
|
|
thd->variables.progress_report_time)
|
|
net_send_progress_packet(thd);
|
|
}
|
|
}
|
|
|
|
|
|
/** Initialize progress report handling **/
|
|
|
|
extern "C" void thd_progress_init(MYSQL_THD thd, uint max_stage)
|
|
{
|
|
DBUG_ASSERT(thd->stmt_arena != thd->progress.arena);
|
|
if (thd->progress.arena)
|
|
return; // already initialized
|
|
/*
|
|
Send progress reports to clients that supports it, if the command
|
|
is a high level command (like ALTER TABLE) and we are not in a
|
|
stored procedure
|
|
*/
|
|
thd->progress.report= ((thd->client_capabilities & CLIENT_PROGRESS) &&
|
|
thd->progress.report_to_client &&
|
|
!thd->in_sub_stmt);
|
|
thd->progress.next_report_time= 0;
|
|
thd->progress.stage= 0;
|
|
thd->progress.counter= thd->progress.max_counter= 0;
|
|
thd->progress.max_stage= max_stage;
|
|
thd->progress.arena= thd->stmt_arena;
|
|
}
|
|
|
|
|
|
/* Inform processlist and the client that some progress has been made */
|
|
|
|
extern "C" void thd_progress_report(MYSQL_THD thd,
|
|
ulonglong progress, ulonglong max_progress)
|
|
{
|
|
if (thd->stmt_arena != thd->progress.arena)
|
|
return;
|
|
if (thd->progress.max_counter != max_progress) // Simple optimization
|
|
{
|
|
mysql_mutex_lock(&thd->LOCK_thd_data);
|
|
thd->progress.counter= progress;
|
|
thd->progress.max_counter= max_progress;
|
|
mysql_mutex_unlock(&thd->LOCK_thd_data);
|
|
}
|
|
else
|
|
thd->progress.counter= progress;
|
|
|
|
if (thd->progress.report)
|
|
thd_send_progress(thd);
|
|
}
|
|
|
|
/**
|
|
Move to next stage in process list handling
|
|
|
|
This will reset the timer to ensure the progress is sent to the client
|
|
if client progress reports are activated.
|
|
*/
|
|
|
|
extern "C" void thd_progress_next_stage(MYSQL_THD thd)
|
|
{
|
|
if (thd->stmt_arena != thd->progress.arena)
|
|
return;
|
|
mysql_mutex_lock(&thd->LOCK_thd_data);
|
|
thd->progress.stage++;
|
|
thd->progress.counter= 0;
|
|
DBUG_ASSERT(thd->progress.stage < thd->progress.max_stage);
|
|
mysql_mutex_unlock(&thd->LOCK_thd_data);
|
|
if (thd->progress.report)
|
|
{
|
|
thd->progress.next_report_time= 0; // Send new stage info
|
|
thd_send_progress(thd);
|
|
}
|
|
}
|
|
|
|
/**
|
|
Disable reporting of progress in process list.
|
|
|
|
@note
|
|
This function is safe to call even if one has not called thd_progress_init.
|
|
|
|
This function should be called by all parts that does progress
|
|
reporting to ensure that progress list doesn't contain 100 % done
|
|
forever.
|
|
*/
|
|
|
|
|
|
extern "C" void thd_progress_end(MYSQL_THD thd)
|
|
{
|
|
if (thd->stmt_arena != thd->progress.arena)
|
|
return;
|
|
/*
|
|
It's enough to reset max_counter to set disable progress indicator
|
|
in processlist.
|
|
*/
|
|
thd->progress.max_counter= 0;
|
|
thd->progress.arena= 0;
|
|
}
|
|
|
|
|
|
/**
|
|
Return the thread id of a user thread
|
|
@param thd user thread
|
|
@return thread id
|
|
*/
|
|
extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd)
|
|
{
|
|
return((unsigned long)thd->thread_id);
|
|
}
|
|
|
|
/**
|
|
Check if THD socket is still connected.
|
|
*/
|
|
extern "C" int thd_is_connected(MYSQL_THD thd)
|
|
{
|
|
return thd->is_connected();
|
|
}
|
|
|
|
|
|
#ifdef INNODB_COMPATIBILITY_HOOKS
|
|
extern "C" const struct charset_info_st *thd_charset(MYSQL_THD thd)
|
|
{
|
|
return(thd->charset());
|
|
}
|
|
|
|
/**
|
|
OBSOLETE : there's no way to ensure the string is null terminated.
|
|
Use thd_query_string instead()
|
|
*/
|
|
extern "C" char **thd_query(MYSQL_THD thd)
|
|
{
|
|
return (&thd->query_string.string.str);
|
|
}
|
|
|
|
/**
|
|
Get the current query string for the thread.
|
|
|
|
@param The MySQL internal thread pointer
|
|
@return query string and length. May be non-null-terminated.
|
|
*/
|
|
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd)
|
|
{
|
|
return(&thd->query_string.string);
|
|
}
|
|
|
|
extern "C" int thd_slave_thread(const MYSQL_THD thd)
|
|
{
|
|
return(thd->slave_thread);
|
|
}
|
|
|
|
/* Returns true for a worker thread in parallel replication. */
|
|
extern "C" int thd_rpl_is_parallel(const MYSQL_THD thd)
|
|
{
|
|
return thd->rgi_slave && thd->rgi_slave->is_parallel_exec;
|
|
}
|
|
|
|
extern "C" int thd_non_transactional_update(const MYSQL_THD thd)
|
|
{
|
|
return(thd->transaction.all.modified_non_trans_table);
|
|
}
|
|
|
|
extern "C" int thd_binlog_format(const MYSQL_THD thd)
|
|
{
|
|
#ifdef WITH_WSREP
|
|
if (((WSREP(thd) && wsrep_emulate_bin_log) || mysql_bin_log.is_open()) &&
|
|
(thd->variables.option_bits & OPTION_BIN_LOG))
|
|
#else
|
|
if (mysql_bin_log.is_open() && (thd->variables.option_bits & OPTION_BIN_LOG))
|
|
#endif
|
|
return (int) WSREP_FORMAT(thd->variables.binlog_format);
|
|
else
|
|
return BINLOG_FORMAT_UNSPEC;
|
|
}
|
|
|
|
extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all)
|
|
{
|
|
mark_transaction_to_rollback(thd, all);
|
|
}
|
|
|
|
extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd)
|
|
{
|
|
return binlog_filter->db_ok(thd->db);
|
|
}
|
|
|
|
extern "C" bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd)
|
|
{
|
|
return sqlcom_can_generate_row_events(thd);
|
|
}
|
|
|
|
|
|
extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd)
|
|
{
|
|
enum durability_properties ret= HA_REGULAR_DURABILITY;
|
|
|
|
if (thd != NULL)
|
|
ret= thd->durability_property;
|
|
|
|
return ret;
|
|
}
|
|
|
|
/** Get the auto_increment_offset auto_increment_increment.
|
|
Exposed by thd_autoinc_service.
|
|
Needed by InnoDB.
|
|
@param thd Thread object
|
|
@param off auto_increment_offset
|
|
@param inc auto_increment_increment */
|
|
extern "C" void thd_get_autoinc(const MYSQL_THD thd, ulong* off, ulong* inc)
|
|
{
|
|
*off = thd->variables.auto_increment_offset;
|
|
*inc = thd->variables.auto_increment_increment;
|
|
}
|
|
|
|
|
|
/**
|
|
Is strict sql_mode set.
|
|
Needed by InnoDB.
|
|
@param thd Thread object
|
|
@return True if sql_mode has strict mode (all or trans).
|
|
@retval true sql_mode has strict mode (all or trans).
|
|
@retval false sql_mode has not strict mode (all or trans).
|
|
*/
|
|
extern "C" bool thd_is_strict_mode(const MYSQL_THD thd)
|
|
{
|
|
return thd->is_strict_mode();
|
|
}
|
|
|
|
|
|
/*
|
|
Interface for MySQL Server, plugins and storage engines to report
|
|
when they are going to sleep/stall.
|
|
|
|
SYNOPSIS
|
|
thd_wait_begin()
|
|
thd Thread object
|
|
Can be NULL, in this case current THD is used.
|
|
wait_type Type of wait
|
|
1 -- short wait (e.g. for mutex)
|
|
2 -- medium wait (e.g. for disk io)
|
|
3 -- large wait (e.g. for locked row/table)
|
|
NOTES
|
|
This is used by the threadpool to have better knowledge of which
|
|
threads that currently are actively running on CPUs. When a thread
|
|
reports that it's going to sleep/stall, the threadpool scheduler is
|
|
free to start another thread in the pool most likely. The expected wait
|
|
time is simply an indication of how long the wait is expected to
|
|
become, the real wait time could be very different.
|
|
|
|
thd_wait_end MUST be called immediately after waking up again.
|
|
*/
|
|
extern "C" void thd_wait_begin(MYSQL_THD thd, int wait_type)
|
|
{
|
|
if (!thd)
|
|
{
|
|
thd= current_thd;
|
|
if (unlikely(!thd))
|
|
return;
|
|
}
|
|
MYSQL_CALLBACK(thd->scheduler, thd_wait_begin, (thd, wait_type));
|
|
}
|
|
|
|
/**
|
|
Interface for MySQL Server, plugins and storage engines to report
|
|
when they waking up from a sleep/stall.
|
|
|
|
@param thd Thread handle
|
|
Can be NULL, in this case current THD is used.
|
|
*/
|
|
extern "C" void thd_wait_end(MYSQL_THD thd)
|
|
{
|
|
if (!thd)
|
|
{
|
|
thd= current_thd;
|
|
if (unlikely(!thd))
|
|
return;
|
|
}
|
|
MYSQL_CALLBACK(thd->scheduler, thd_wait_end, (thd));
|
|
}
|
|
|
|
#endif // INNODB_COMPATIBILITY_HOOKS */
|
|
|
|
/****************************************************************************
|
|
Handling of statement states in functions and triggers.
|
|
|
|
This is used to ensure that the function/trigger gets a clean state
|
|
to work with and does not cause any side effects of the calling statement.
|
|
|
|
It also allows most stored functions and triggers to replicate even
|
|
if they are used items that would normally be stored in the binary
|
|
replication (like last_insert_id() etc...)
|
|
|
|
The following things is done
|
|
- Disable binary logging for the duration of the statement
|
|
- Disable multi-result-sets for the duration of the statement
|
|
- Value of last_insert_id() is saved and restored
|
|
- Value set by 'SET INSERT_ID=#' is reset and restored
|
|
- Value for found_rows() is reset and restored
|
|
- examined_row_count is added to the total
|
|
- cuted_fields is added to the total
|
|
- new savepoint level is created and destroyed
|
|
|
|
NOTES:
|
|
Seed for random() is saved for the first! usage of RAND()
|
|
We reset examined_row_count and cuted_fields and add these to the
|
|
result to ensure that if we have a bug that would reset these within
|
|
a function, we are not loosing any rows from the main statement.
|
|
|
|
We do not reset value of last_insert_id().
|
|
****************************************************************************/
|
|
|
|
void THD::reset_sub_statement_state(Sub_statement_state *backup,
|
|
uint new_state)
|
|
{
|
|
#ifndef EMBEDDED_LIBRARY
|
|
/* BUG#33029, if we are replicating from a buggy master, reset
|
|
auto_inc_intervals_forced to prevent substatement
|
|
(triggers/functions) from using erroneous INSERT_ID value
|
|
*/
|
|
if (rpl_master_erroneous_autoinc(this))
|
|
{
|
|
DBUG_ASSERT(backup->auto_inc_intervals_forced.nb_elements() == 0);
|
|
auto_inc_intervals_forced.swap(&backup->auto_inc_intervals_forced);
|
|
}
|
|
#endif
|
|
|
|
backup->option_bits= variables.option_bits;
|
|
backup->count_cuted_fields= count_cuted_fields;
|
|
backup->in_sub_stmt= in_sub_stmt;
|
|
backup->enable_slow_log= enable_slow_log;
|
|
backup->query_plan_flags= query_plan_flags;
|
|
backup->limit_found_rows= limit_found_rows;
|
|
backup->examined_row_count= m_examined_row_count;
|
|
backup->sent_row_count= m_sent_row_count;
|
|
backup->cuted_fields= cuted_fields;
|
|
backup->client_capabilities= client_capabilities;
|
|
backup->savepoints= transaction.savepoints;
|
|
backup->first_successful_insert_id_in_prev_stmt=
|
|
first_successful_insert_id_in_prev_stmt;
|
|
backup->first_successful_insert_id_in_cur_stmt=
|
|
first_successful_insert_id_in_cur_stmt;
|
|
|
|
if ((!lex->requires_prelocking() || is_update_query(lex->sql_command)) &&
|
|
!is_current_stmt_binlog_format_row())
|
|
{
|
|
variables.option_bits&= ~OPTION_BIN_LOG;
|
|
}
|
|
|
|
if ((backup->option_bits & OPTION_BIN_LOG) &&
|
|
is_update_query(lex->sql_command) &&
|
|
!is_current_stmt_binlog_format_row())
|
|
mysql_bin_log.start_union_events(this, this->query_id);
|
|
|
|
/* Disable result sets */
|
|
client_capabilities &= ~CLIENT_MULTI_RESULTS;
|
|
in_sub_stmt|= new_state;
|
|
m_examined_row_count= 0;
|
|
m_sent_row_count= 0;
|
|
cuted_fields= 0;
|
|
transaction.savepoints= 0;
|
|
first_successful_insert_id_in_cur_stmt= 0;
|
|
}
|
|
|
|
|
|
void THD::restore_sub_statement_state(Sub_statement_state *backup)
|
|
{
|
|
DBUG_ENTER("THD::restore_sub_statement_state");
|
|
#ifndef EMBEDDED_LIBRARY
|
|
/* BUG#33029, if we are replicating from a buggy master, restore
|
|
auto_inc_intervals_forced so that the top statement can use the
|
|
INSERT_ID value set before this statement.
|
|
*/
|
|
if (rpl_master_erroneous_autoinc(this))
|
|
{
|
|
backup->auto_inc_intervals_forced.swap(&auto_inc_intervals_forced);
|
|
DBUG_ASSERT(backup->auto_inc_intervals_forced.nb_elements() == 0);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
To save resources we want to release savepoints which were created
|
|
during execution of function or trigger before leaving their savepoint
|
|
level. It is enough to release first savepoint set on this level since
|
|
all later savepoints will be released automatically.
|
|
*/
|
|
if (transaction.savepoints)
|
|
{
|
|
SAVEPOINT *sv;
|
|
for (sv= transaction.savepoints; sv->prev; sv= sv->prev)
|
|
{}
|
|
/* ha_release_savepoint() never returns error. */
|
|
(void)ha_release_savepoint(this, sv);
|
|
}
|
|
count_cuted_fields= backup->count_cuted_fields;
|
|
transaction.savepoints= backup->savepoints;
|
|
variables.option_bits= backup->option_bits;
|
|
in_sub_stmt= backup->in_sub_stmt;
|
|
enable_slow_log= backup->enable_slow_log;
|
|
query_plan_flags= backup->query_plan_flags;
|
|
first_successful_insert_id_in_prev_stmt=
|
|
backup->first_successful_insert_id_in_prev_stmt;
|
|
first_successful_insert_id_in_cur_stmt=
|
|
backup->first_successful_insert_id_in_cur_stmt;
|
|
limit_found_rows= backup->limit_found_rows;
|
|
set_sent_row_count(backup->sent_row_count);
|
|
client_capabilities= backup->client_capabilities;
|
|
/*
|
|
If we've left sub-statement mode, reset the fatal error flag.
|
|
Otherwise keep the current value, to propagate it up the sub-statement
|
|
stack.
|
|
*/
|
|
if (!in_sub_stmt)
|
|
is_fatal_sub_stmt_error= FALSE;
|
|
|
|
if ((variables.option_bits & OPTION_BIN_LOG) && is_update_query(lex->sql_command) &&
|
|
!is_current_stmt_binlog_format_row())
|
|
mysql_bin_log.stop_union_events(this);
|
|
|
|
/*
|
|
The following is added to the old values as we are interested in the
|
|
total complexity of the query
|
|
*/
|
|
inc_examined_row_count(backup->examined_row_count);
|
|
cuted_fields+= backup->cuted_fields;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
|
|
void THD::set_statement(Statement *stmt)
|
|
{
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
Statement::set_statement(stmt);
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
}
|
|
|
|
void THD::set_sent_row_count(ha_rows count)
|
|
{
|
|
m_sent_row_count= count;
|
|
MYSQL_SET_STATEMENT_ROWS_SENT(m_statement_psi, m_sent_row_count);
|
|
}
|
|
|
|
void THD::set_examined_row_count(ha_rows count)
|
|
{
|
|
m_examined_row_count= count;
|
|
MYSQL_SET_STATEMENT_ROWS_EXAMINED(m_statement_psi, m_examined_row_count);
|
|
}
|
|
|
|
void THD::inc_sent_row_count(ha_rows count)
|
|
{
|
|
m_sent_row_count+= count;
|
|
MYSQL_SET_STATEMENT_ROWS_SENT(m_statement_psi, m_sent_row_count);
|
|
}
|
|
|
|
void THD::inc_examined_row_count(ha_rows count)
|
|
{
|
|
m_examined_row_count+= count;
|
|
MYSQL_SET_STATEMENT_ROWS_EXAMINED(m_statement_psi, m_examined_row_count);
|
|
}
|
|
|
|
void THD::inc_status_created_tmp_disk_tables()
|
|
{
|
|
status_var_increment(status_var.created_tmp_disk_tables_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_created_tmp_disk_tables)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_created_tmp_tables()
|
|
{
|
|
status_var_increment(status_var.created_tmp_tables_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_created_tmp_tables)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_select_full_join()
|
|
{
|
|
status_var_increment(status_var.select_full_join_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_select_full_join)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_select_full_range_join()
|
|
{
|
|
status_var_increment(status_var.select_full_range_join_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_select_full_range_join)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_select_range()
|
|
{
|
|
status_var_increment(status_var.select_range_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_select_range)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_select_range_check()
|
|
{
|
|
status_var_increment(status_var.select_range_check_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_select_range_check)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_select_scan()
|
|
{
|
|
status_var_increment(status_var.select_scan_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_select_scan)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_sort_merge_passes()
|
|
{
|
|
status_var_increment(status_var.filesort_merge_passes_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_sort_merge_passes)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_sort_range()
|
|
{
|
|
status_var_increment(status_var.filesort_range_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_sort_range)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_sort_rows(ha_rows count)
|
|
{
|
|
statistic_add(status_var.filesort_rows_, count, &LOCK_status);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, count);
|
|
#endif
|
|
}
|
|
|
|
void THD::inc_status_sort_scan()
|
|
{
|
|
status_var_increment(status_var.filesort_scan_count_);
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(inc_statement_sort_scan)(m_statement_psi, 1);
|
|
#endif
|
|
}
|
|
|
|
void THD::set_status_no_index_used()
|
|
{
|
|
server_status|= SERVER_QUERY_NO_INDEX_USED;
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(set_statement_no_index_used)(m_statement_psi);
|
|
#endif
|
|
}
|
|
|
|
void THD::set_status_no_good_index_used()
|
|
{
|
|
server_status|= SERVER_QUERY_NO_GOOD_INDEX_USED;
|
|
#ifdef HAVE_PSI_STATEMENT_INTERFACE
|
|
PSI_STATEMENT_CALL(set_statement_no_good_index_used)(m_statement_psi);
|
|
#endif
|
|
}
|
|
|
|
void THD::set_command(enum enum_server_command command)
|
|
{
|
|
m_command= command;
|
|
#ifdef HAVE_PSI_THREAD_INTERFACE
|
|
PSI_STATEMENT_CALL(set_thread_command)(m_command);
|
|
#endif
|
|
}
|
|
|
|
/** Assign a new value to thd->query. */
|
|
|
|
void THD::set_query(const CSET_STRING &string_arg)
|
|
{
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
set_query_inner(string_arg);
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
|
|
#ifdef HAVE_PSI_THREAD_INTERFACE
|
|
PSI_THREAD_CALL(set_thread_info)(query(), query_length());
|
|
#endif
|
|
}
|
|
|
|
/** Assign a new value to thd->query and thd->query_id. */
|
|
|
|
void THD::set_query_and_id(char *query_arg, uint32 query_length_arg,
|
|
CHARSET_INFO *cs,
|
|
query_id_t new_query_id)
|
|
{
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
set_query_inner(query_arg, query_length_arg, cs);
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
query_id= new_query_id;
|
|
}
|
|
|
|
/** Assign a new value to thd->mysys_var. */
|
|
void THD::set_mysys_var(struct st_my_thread_var *new_mysys_var)
|
|
{
|
|
mysql_mutex_lock(&LOCK_thd_data);
|
|
mysys_var= new_mysys_var;
|
|
mysql_mutex_unlock(&LOCK_thd_data);
|
|
}
|
|
|
|
/**
|
|
Leave explicit LOCK TABLES or prelocked mode and restore value of
|
|
transaction sentinel in MDL subsystem.
|
|
*/
|
|
|
|
void THD::leave_locked_tables_mode()
|
|
{
|
|
if (locked_tables_mode == LTM_LOCK_TABLES)
|
|
{
|
|
/*
|
|
When leaving LOCK TABLES mode we have to change the duration of most
|
|
of the metadata locks being held, except for HANDLER and GRL locks,
|
|
to transactional for them to be properly released at UNLOCK TABLES.
|
|
*/
|
|
mdl_context.set_transaction_duration_for_all_locks();
|
|
/*
|
|
Make sure we don't release the global read lock and commit blocker
|
|
when leaving LTM.
|
|
*/
|
|
global_read_lock.set_explicit_lock_duration(this);
|
|
/* Also ensure that we don't release metadata locks for open HANDLERs. */
|
|
if (handler_tables_hash.records)
|
|
mysql_ha_set_explicit_lock_duration(this);
|
|
if (ull_hash.records)
|
|
mysql_ull_set_explicit_lock_duration(this);
|
|
}
|
|
locked_tables_mode= LTM_NONE;
|
|
}
|
|
|
|
void THD::get_definer(LEX_USER *definer, bool role)
|
|
{
|
|
binlog_invoker(role);
|
|
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
|
|
if (slave_thread && has_invoker())
|
|
{
|
|
definer->user = invoker_user;
|
|
definer->host= invoker_host;
|
|
definer->password= null_lex_str;
|
|
definer->plugin= empty_lex_str;
|
|
definer->auth= empty_lex_str;
|
|
}
|
|
else
|
|
#endif
|
|
get_default_definer(this, definer, role);
|
|
}
|
|
|
|
|
|
/**
|
|
Mark transaction to rollback and mark error as fatal to a sub-statement.
|
|
|
|
@param thd Thread handle
|
|
@param all TRUE <=> rollback main transaction.
|
|
*/
|
|
|
|
void mark_transaction_to_rollback(THD *thd, bool all)
|
|
{
|
|
if (thd)
|
|
{
|
|
thd->is_fatal_sub_stmt_error= TRUE;
|
|
thd->transaction_rollback_request= all;
|
|
}
|
|
}
|
|
/***************************************************************************
|
|
Handling of XA id cacheing
|
|
***************************************************************************/
|
|
|
|
mysql_mutex_t LOCK_xid_cache;
|
|
HASH xid_cache;
|
|
|
|
extern "C" uchar *xid_get_hash_key(const uchar *, size_t *, my_bool);
|
|
extern "C" void xid_free_hash(void *);
|
|
|
|
uchar *xid_get_hash_key(const uchar *ptr, size_t *length,
|
|
my_bool not_used __attribute__((unused)))
|
|
{
|
|
*length=((XID_STATE*)ptr)->xid.key_length();
|
|
return ((XID_STATE*)ptr)->xid.key();
|
|
}
|
|
|
|
void xid_free_hash(void *ptr)
|
|
{
|
|
if (!((XID_STATE*)ptr)->in_thd)
|
|
my_free(ptr);
|
|
}
|
|
|
|
#ifdef HAVE_PSI_INTERFACE
|
|
static PSI_mutex_key key_LOCK_xid_cache;
|
|
|
|
static PSI_mutex_info all_xid_mutexes[]=
|
|
{
|
|
{ &key_LOCK_xid_cache, "LOCK_xid_cache", PSI_FLAG_GLOBAL}
|
|
};
|
|
|
|
static void init_xid_psi_keys(void)
|
|
{
|
|
const char* category= "sql";
|
|
int count;
|
|
|
|
if (PSI_server == NULL)
|
|
return;
|
|
|
|
count= array_elements(all_xid_mutexes);
|
|
PSI_server->register_mutex(category, all_xid_mutexes, count);
|
|
}
|
|
#endif /* HAVE_PSI_INTERFACE */
|
|
|
|
bool xid_cache_init()
|
|
{
|
|
#ifdef HAVE_PSI_INTERFACE
|
|
init_xid_psi_keys();
|
|
#endif
|
|
|
|
mysql_mutex_init(key_LOCK_xid_cache, &LOCK_xid_cache, MY_MUTEX_INIT_FAST);
|
|
return my_hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
|
|
xid_get_hash_key, xid_free_hash, 0) != 0;
|
|
}
|
|
|
|
void xid_cache_free()
|
|
{
|
|
if (my_hash_inited(&xid_cache))
|
|
{
|
|
my_hash_free(&xid_cache);
|
|
mysql_mutex_destroy(&LOCK_xid_cache);
|
|
}
|
|
}
|
|
|
|
XID_STATE *xid_cache_search(XID *xid)
|
|
{
|
|
mysql_mutex_lock(&LOCK_xid_cache);
|
|
XID_STATE *res=(XID_STATE *)my_hash_search(&xid_cache, xid->key(),
|
|
xid->key_length());
|
|
mysql_mutex_unlock(&LOCK_xid_cache);
|
|
return res;
|
|
}
|
|
|
|
|
|
bool xid_cache_insert(XID *xid, enum xa_states xa_state)
|
|
{
|
|
XID_STATE *xs;
|
|
my_bool res;
|
|
mysql_mutex_lock(&LOCK_xid_cache);
|
|
if (my_hash_search(&xid_cache, xid->key(), xid->key_length()))
|
|
res=0;
|
|
else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME))))
|
|
res=1;
|
|
else
|
|
{
|
|
xs->xa_state=xa_state;
|
|
xs->xid.set(xid);
|
|
xs->in_thd=0;
|
|
xs->rm_error=0;
|
|
res=my_hash_insert(&xid_cache, (uchar*)xs);
|
|
}
|
|
mysql_mutex_unlock(&LOCK_xid_cache);
|
|
return res;
|
|
}
|
|
|
|
|
|
bool xid_cache_insert(XID_STATE *xid_state)
|
|
{
|
|
mysql_mutex_lock(&LOCK_xid_cache);
|
|
if (my_hash_search(&xid_cache, xid_state->xid.key(),
|
|
xid_state->xid.key_length()))
|
|
{
|
|
mysql_mutex_unlock(&LOCK_xid_cache);
|
|
my_error(ER_XAER_DUPID, MYF(0));
|
|
return true;
|
|
}
|
|
bool res= my_hash_insert(&xid_cache, (uchar*)xid_state);
|
|
mysql_mutex_unlock(&LOCK_xid_cache);
|
|
return res;
|
|
}
|
|
|
|
|
|
void xid_cache_delete(XID_STATE *xid_state)
|
|
{
|
|
mysql_mutex_lock(&LOCK_xid_cache);
|
|
my_hash_delete(&xid_cache, (uchar *)xid_state);
|
|
mysql_mutex_unlock(&LOCK_xid_cache);
|
|
}
|
|
|
|
|
|
/**
|
|
Decide on logging format to use for the statement and issue errors
|
|
or warnings as needed. The decision depends on the following
|
|
parameters:
|
|
|
|
- The logging mode, i.e., the value of binlog_format. Can be
|
|
statement, mixed, or row.
|
|
|
|
- The type of statement. There are three types of statements:
|
|
"normal" safe statements; unsafe statements; and row injections.
|
|
An unsafe statement is one that, if logged in statement format,
|
|
might produce different results when replayed on the slave (e.g.,
|
|
INSERT DELAYED). A row injection is either a BINLOG statement, or
|
|
a row event executed by the slave's SQL thread.
|
|
|
|
- The capabilities of tables modified by the statement. The
|
|
*capabilities vector* for a table is a set of flags associated
|
|
with the table. Currently, it only includes two flags: *row
|
|
capability flag* and *statement capability flag*.
|
|
|
|
The row capability flag is set if and only if the engine can
|
|
handle row-based logging. The statement capability flag is set if
|
|
and only if the table can handle statement-based logging.
|
|
|
|
Decision table for logging format
|
|
---------------------------------
|
|
|
|
The following table summarizes how the format and generated
|
|
warning/error depends on the tables' capabilities, the statement
|
|
type, and the current binlog_format.
|
|
|
|
Row capable N NNNNNNNNN YYYYYYYYY YYYYYYYYY
|
|
Statement capable N YYYYYYYYY NNNNNNNNN YYYYYYYYY
|
|
|
|
Statement type * SSSUUUIII SSSUUUIII SSSUUUIII
|
|
|
|
binlog_format * SMRSMRSMR SMRSMRSMR SMRSMRSMR
|
|
|
|
Logged format - SS-S----- -RR-RR-RR SRRSRR-RR
|
|
Warning/Error 1 --2732444 5--5--6-- ---7--6--
|
|
|
|
Legend
|
|
------
|
|
|
|
Row capable: N - Some table not row-capable, Y - All tables row-capable
|
|
Stmt capable: N - Some table not stmt-capable, Y - All tables stmt-capable
|
|
Statement type: (S)afe, (U)nsafe, or Row (I)njection
|
|
binlog_format: (S)TATEMENT, (M)IXED, or (R)OW
|
|
Logged format: (S)tatement or (R)ow
|
|
Warning/Error: Warnings and error messages are as follows:
|
|
|
|
1. Error: Cannot execute statement: binlogging impossible since both
|
|
row-incapable engines and statement-incapable engines are
|
|
involved.
|
|
|
|
2. Error: Cannot execute statement: binlogging impossible since
|
|
BINLOG_FORMAT = ROW and at least one table uses a storage engine
|
|
limited to statement-logging.
|
|
|
|
3. Error: Cannot execute statement: binlogging of unsafe statement
|
|
is impossible when storage engine is limited to statement-logging
|
|
and BINLOG_FORMAT = MIXED.
|
|
|
|
4. Error: Cannot execute row injection: binlogging impossible since
|
|
at least one table uses a storage engine limited to
|
|
statement-logging.
|
|
|
|
5. Error: Cannot execute statement: binlogging impossible since
|
|
BINLOG_FORMAT = STATEMENT and at least one table uses a storage
|
|
engine limited to row-logging.
|
|
|
|
6. Error: Cannot execute row injection: binlogging impossible since
|
|
BINLOG_FORMAT = STATEMENT.
|
|
|
|
7. Warning: Unsafe statement binlogged in statement format since
|
|
BINLOG_FORMAT = STATEMENT.
|
|
|
|
In addition, we can produce the following error (not depending on
|
|
the variables of the decision diagram):
|
|
|
|
8. Error: Cannot execute statement: binlogging impossible since more
|
|
than one engine is involved and at least one engine is
|
|
self-logging.
|
|
|
|
For each error case above, the statement is prevented from being
|
|
logged, we report an error, and roll back the statement. For
|
|
warnings, we set the thd->binlog_flags variable: the warning will be
|
|
printed only if the statement is successfully logged.
|
|
|
|
@see THD::binlog_query
|
|
|
|
@param[in] thd Client thread
|
|
@param[in] tables Tables involved in the query
|
|
|
|
@retval 0 No error; statement can be logged.
|
|
@retval -1 One of the error conditions above applies (1, 2, 4, 5, or 6).
|
|
*/
|
|
|
|
int THD::decide_logging_format(TABLE_LIST *tables)
|
|
{
|
|
DBUG_ENTER("THD::decide_logging_format");
|
|
DBUG_PRINT("info", ("Query: %s", query()));
|
|
DBUG_PRINT("info", ("variables.binlog_format: %lu",
|
|
variables.binlog_format));
|
|
DBUG_PRINT("info", ("lex->get_stmt_unsafe_flags(): 0x%x",
|
|
lex->get_stmt_unsafe_flags()));
|
|
|
|
reset_binlog_local_stmt_filter();
|
|
|
|
/*
|
|
We should not decide logging format if the binlog is closed or
|
|
binlogging is off, or if the statement is filtered out from the
|
|
binlog by filtering rules.
|
|
*/
|
|
if (mysql_bin_log.is_open() && (variables.option_bits & OPTION_BIN_LOG) &&
|
|
!(WSREP_FORMAT(variables.binlog_format) == BINLOG_FORMAT_STMT &&
|
|
!binlog_filter->db_ok(db)))
|
|
{
|
|
/*
|
|
Compute one bit field with the union of all the engine
|
|
capabilities, and one with the intersection of all the engine
|
|
capabilities.
|
|
*/
|
|
handler::Table_flags flags_write_some_set= 0;
|
|
handler::Table_flags flags_access_some_set= 0;
|
|
handler::Table_flags flags_write_all_set=
|
|
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE;
|
|
|
|
/*
|
|
If different types of engines are about to be updated.
|
|
For example: Innodb and Falcon; Innodb and MyIsam.
|
|
*/
|
|
my_bool multi_write_engine= FALSE;
|
|
/*
|
|
If different types of engines are about to be accessed
|
|
and any of them is about to be updated. For example:
|
|
Innodb and Falcon; Innodb and MyIsam.
|
|
*/
|
|
my_bool multi_access_engine= FALSE;
|
|
/*
|
|
Identifies if a table is changed.
|
|
*/
|
|
my_bool is_write= FALSE;
|
|
/*
|
|
A pointer to a previous table that was changed.
|
|
*/
|
|
TABLE* prev_write_table= NULL;
|
|
/*
|
|
A pointer to a previous table that was accessed.
|
|
*/
|
|
TABLE* prev_access_table= NULL;
|
|
/**
|
|
The number of tables used in the current statement,
|
|
that should be replicated.
|
|
*/
|
|
uint replicated_tables_count= 0;
|
|
/**
|
|
The number of tables written to in the current statement,
|
|
that should not be replicated.
|
|
A table should not be replicated when it is considered
|
|
'local' to a MySQL instance.
|
|
Currently, these tables are:
|
|
- mysql.slow_log
|
|
- mysql.general_log
|
|
- mysql.slave_relay_log_info
|
|
- mysql.slave_master_info
|
|
- mysql.slave_worker_info
|
|
- performance_schema.*
|
|
- TODO: information_schema.*
|
|
In practice, from this list, only performance_schema.* tables
|
|
are written to by user queries.
|
|
*/
|
|
uint non_replicated_tables_count= 0;
|
|
|
|
#ifndef DBUG_OFF
|
|
{
|
|
static const char *prelocked_mode_name[] = {
|
|
"NON_PRELOCKED",
|
|
"PRELOCKED",
|
|
"PRELOCKED_UNDER_LOCK_TABLES",
|
|
};
|
|
DBUG_PRINT("debug", ("prelocked_mode: %s",
|
|
prelocked_mode_name[locked_tables_mode]));
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
Get the capabilities vector for all involved storage engines and
|
|
mask out the flags for the binary log.
|
|
*/
|
|
for (TABLE_LIST *table= tables; table; table= table->next_global)
|
|
{
|
|
if (table->placeholder())
|
|
continue;
|
|
|
|
handler::Table_flags const flags= table->table->file->ha_table_flags();
|
|
|
|
DBUG_PRINT("info", ("table: %s; ha_table_flags: 0x%llx",
|
|
table->table_name, flags));
|
|
|
|
if (table->table->no_replicate)
|
|
{
|
|
/*
|
|
The statement uses a table that is not replicated.
|
|
The following properties about the table:
|
|
- persistent / transient
|
|
- transactional / non transactional
|
|
- temporary / permanent
|
|
- read or write
|
|
- multiple engines involved because of this table
|
|
are not relevant, as this table is completely ignored.
|
|
Because the statement uses a non replicated table,
|
|
using STATEMENT format in the binlog is impossible.
|
|
Either this statement will be discarded entirely,
|
|
or it will be logged (possibly partially) in ROW format.
|
|
*/
|
|
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_TABLE);
|
|
|
|
if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
|
|
{
|
|
non_replicated_tables_count++;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
replicated_tables_count++;
|
|
|
|
if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
|
|
{
|
|
if (prev_write_table && prev_write_table->file->ht !=
|
|
table->table->file->ht)
|
|
multi_write_engine= TRUE;
|
|
|
|
my_bool trans= table->table->file->has_transactions();
|
|
|
|
if (table->table->s->tmp_table)
|
|
lex->set_stmt_accessed_table(trans ? LEX::STMT_WRITES_TEMP_TRANS_TABLE :
|
|
LEX::STMT_WRITES_TEMP_NON_TRANS_TABLE);
|
|
else
|
|
lex->set_stmt_accessed_table(trans ? LEX::STMT_WRITES_TRANS_TABLE :
|
|
LEX::STMT_WRITES_NON_TRANS_TABLE);
|
|
|
|
flags_write_all_set &= flags;
|
|
flags_write_some_set |= flags;
|
|
is_write= TRUE;
|
|
|
|
prev_write_table= table->table;
|
|
|
|
}
|
|
flags_access_some_set |= flags;
|
|
|
|
if (lex->sql_command != SQLCOM_CREATE_TABLE ||
|
|
(lex->sql_command == SQLCOM_CREATE_TABLE &&
|
|
lex->create_info.tmp_table()))
|
|
{
|
|
my_bool trans= table->table->file->has_transactions();
|
|
|
|
if (table->table->s->tmp_table)
|
|
lex->set_stmt_accessed_table(trans ? LEX::STMT_READS_TEMP_TRANS_TABLE :
|
|
LEX::STMT_READS_TEMP_NON_TRANS_TABLE);
|
|
else
|
|
lex->set_stmt_accessed_table(trans ? LEX::STMT_READS_TRANS_TABLE :
|
|
LEX::STMT_READS_NON_TRANS_TABLE);
|
|
}
|
|
|
|
if (prev_access_table && prev_access_table->file->ht !=
|
|
table->table->file->ht)
|
|
multi_access_engine= TRUE;
|
|
|
|
prev_access_table= table->table;
|
|
}
|
|
|
|
DBUG_PRINT("info", ("flags_write_all_set: 0x%llx", flags_write_all_set));
|
|
DBUG_PRINT("info", ("flags_write_some_set: 0x%llx", flags_write_some_set));
|
|
DBUG_PRINT("info", ("flags_access_some_set: 0x%llx", flags_access_some_set));
|
|
DBUG_PRINT("info", ("multi_write_engine: %d", multi_write_engine));
|
|
DBUG_PRINT("info", ("multi_access_engine: %d", multi_access_engine));
|
|
|
|
int error= 0;
|
|
int unsafe_flags;
|
|
|
|
bool multi_stmt_trans= in_multi_stmt_transaction_mode();
|
|
bool trans_table= trans_has_updated_trans_table(this);
|
|
bool binlog_direct= variables.binlog_direct_non_trans_update;
|
|
|
|
if (lex->is_mixed_stmt_unsafe(multi_stmt_trans, binlog_direct,
|
|
trans_table, tx_isolation))
|
|
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_MIXED_STATEMENT);
|
|
else if (multi_stmt_trans && trans_table && !binlog_direct &&
|
|
lex->stmt_accessed_table(LEX::STMT_WRITES_NON_TRANS_TABLE))
|
|
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_NONTRANS_AFTER_TRANS);
|
|
|
|
/*
|
|
If more than one engine is involved in the statement and at
|
|
least one is doing it's own logging (is *self-logging*), the
|
|
statement cannot be logged atomically, so we generate an error
|
|
rather than allowing the binlog to become corrupt.
|
|
*/
|
|
if (multi_write_engine &&
|
|
(flags_write_some_set & HA_HAS_OWN_BINLOGGING))
|
|
my_error((error= ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE),
|
|
MYF(0));
|
|
else if (multi_access_engine && flags_access_some_set & HA_HAS_OWN_BINLOGGING)
|
|
lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE);
|
|
|
|
/* both statement-only and row-only engines involved */
|
|
if ((flags_write_all_set & (HA_BINLOG_STMT_CAPABLE | HA_BINLOG_ROW_CAPABLE)) == 0)
|
|
{
|
|
/*
|
|
1. Error: Binary logging impossible since both row-incapable
|
|
engines and statement-incapable engines are involved
|
|
*/
|
|
my_error((error= ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE), MYF(0));
|
|
}
|
|
/* statement-only engines involved */
|
|
else if ((flags_write_all_set & HA_BINLOG_ROW_CAPABLE) == 0)
|
|
{
|
|
if (lex->is_stmt_row_injection())
|
|
{
|
|
/*
|
|
4. Error: Cannot execute row injection since table uses
|
|
storage engine limited to statement-logging
|
|
*/
|
|
my_error((error= ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE), MYF(0));
|
|
}
|
|
else if (WSREP_FORMAT(variables.binlog_format) == BINLOG_FORMAT_ROW &&
|
|
sqlcom_can_generate_row_events(this))
|
|
{
|
|
/*
|
|
2. Error: Cannot modify table that uses a storage engine
|
|
limited to statement-logging when BINLOG_FORMAT = ROW
|
|
*/
|
|
my_error((error= ER_BINLOG_ROW_MODE_AND_STMT_ENGINE), MYF(0));
|
|
}
|
|
else if ((unsafe_flags= lex->get_stmt_unsafe_flags()) != 0)
|
|
{
|
|
/*
|
|
3. Error: Cannot execute statement: binlogging of unsafe
|
|
statement is impossible when storage engine is limited to
|
|
statement-logging and BINLOG_FORMAT = MIXED.
|
|
*/
|
|
for (int unsafe_type= 0;
|
|
unsafe_type < LEX::BINLOG_STMT_UNSAFE_COUNT;
|
|
unsafe_type++)
|
|
if (unsafe_flags & (1 << unsafe_type))
|
|
my_error((error= ER_BINLOG_UNSAFE_AND_STMT_ENGINE), MYF(0),
|
|
ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]));
|
|
}
|
|
/* log in statement format! */
|
|
}
|
|
/* no statement-only engines */
|
|
else
|
|
{
|
|
/* binlog_format = STATEMENT */
|
|
if (WSREP_FORMAT(variables.binlog_format) == BINLOG_FORMAT_STMT)
|
|
{
|
|
if (lex->is_stmt_row_injection())
|
|
{
|
|
/*
|
|
6. Error: Cannot execute row injection since
|
|
BINLOG_FORMAT = STATEMENT
|
|
*/
|
|
my_error((error= ER_BINLOG_ROW_INJECTION_AND_STMT_MODE), MYF(0));
|
|
}
|
|
else if ((flags_write_all_set & HA_BINLOG_STMT_CAPABLE) == 0 &&
|
|
sqlcom_can_generate_row_events(this))
|
|
{
|
|
/*
|
|
5. Error: Cannot modify table that uses a storage engine
|
|
limited to row-logging when binlog_format = STATEMENT
|
|
*/
|
|
#ifdef WITH_WSREP
|
|
if (!WSREP(this) || wsrep_exec_mode == LOCAL_STATE)
|
|
{
|
|
#endif /* WITH_WSREP */
|
|
my_error((error= ER_BINLOG_STMT_MODE_AND_ROW_ENGINE), MYF(0), "");
|
|
#ifdef WITH_WSREP
|
|
}
|
|
#endif /* WITH_WSREP */
|
|
}
|
|
else if (is_write && (unsafe_flags= lex->get_stmt_unsafe_flags()) != 0)
|
|
{
|
|
/*
|
|
7. Warning: Unsafe statement logged as statement due to
|
|
binlog_format = STATEMENT
|
|
*/
|
|
binlog_unsafe_warning_flags|= unsafe_flags;
|
|
|
|
DBUG_PRINT("info", ("Scheduling warning to be issued by "
|
|
"binlog_query: '%s'",
|
|
ER(ER_BINLOG_UNSAFE_STATEMENT)));
|
|
DBUG_PRINT("info", ("binlog_unsafe_warning_flags: 0x%x",
|
|
binlog_unsafe_warning_flags));
|
|
}
|
|
/* log in statement format! */
|
|
}
|
|
/* No statement-only engines and binlog_format != STATEMENT.
|
|
I.e., nothing prevents us from row logging if needed. */
|
|
else
|
|
{
|
|
if (lex->is_stmt_unsafe() || lex->is_stmt_row_injection()
|
|
|| (flags_write_all_set & HA_BINLOG_STMT_CAPABLE) == 0)
|
|
{
|
|
/* log in row format! */
|
|
set_current_stmt_binlog_format_row_if_mixed();
|
|
}
|
|
}
|
|
}
|
|
|
|
if (non_replicated_tables_count > 0)
|
|
{
|
|
if ((replicated_tables_count == 0) || ! is_write)
|
|
{
|
|
DBUG_PRINT("info", ("decision: no logging, no replicated table affected"));
|
|
set_binlog_local_stmt_filter();
|
|
}
|
|
else
|
|
{
|
|
if (! is_current_stmt_binlog_format_row())
|
|
{
|
|
my_error((error= ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES), MYF(0));
|
|
}
|
|
else
|
|
{
|
|
clear_binlog_local_stmt_filter();
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
clear_binlog_local_stmt_filter();
|
|
}
|
|
|
|
if (error) {
|
|
DBUG_PRINT("info", ("decision: no logging since an error was generated"));
|
|
DBUG_RETURN(-1);
|
|
}
|
|
DBUG_PRINT("info", ("decision: logging in %s format",
|
|
is_current_stmt_binlog_format_row() ?
|
|
"ROW" : "STATEMENT"));
|
|
|
|
if (variables.binlog_format == BINLOG_FORMAT_ROW &&
|
|
(lex->sql_command == SQLCOM_UPDATE ||
|
|
lex->sql_command == SQLCOM_UPDATE_MULTI ||
|
|
lex->sql_command == SQLCOM_DELETE ||
|
|
lex->sql_command == SQLCOM_DELETE_MULTI))
|
|
{
|
|
String table_names;
|
|
/*
|
|
Generate a warning for UPDATE/DELETE statements that modify a
|
|
BLACKHOLE table, as row events are not logged in row format.
|
|
*/
|
|
for (TABLE_LIST *table= tables; table; table= table->next_global)
|
|
{
|
|
if (table->placeholder())
|
|
continue;
|
|
if (table->table->file->ht->db_type == DB_TYPE_BLACKHOLE_DB &&
|
|
table->lock_type >= TL_WRITE_ALLOW_WRITE)
|
|
{
|
|
table_names.append(table->table_name);
|
|
table_names.append(",");
|
|
}
|
|
}
|
|
if (!table_names.is_empty())
|
|
{
|
|
bool is_update= (lex->sql_command == SQLCOM_UPDATE ||
|
|
lex->sql_command == SQLCOM_UPDATE_MULTI);
|
|
/*
|
|
Replace the last ',' with '.' for table_names
|
|
*/
|
|
table_names.replace(table_names.length()-1, 1, ".", 1);
|
|
push_warning_printf(this, Sql_condition::WARN_LEVEL_WARN,
|
|
ER_UNKNOWN_ERROR,
|
|
"Row events are not logged for %s statements "
|
|
"that modify BLACKHOLE tables in row format. "
|
|
"Table(s): '%-.192s'",
|
|
is_update ? "UPDATE" : "DELETE",
|
|
table_names.c_ptr());
|
|
}
|
|
}
|
|
}
|
|
#ifndef DBUG_OFF
|
|
else
|
|
DBUG_PRINT("info", ("decision: no logging since "
|
|
"mysql_bin_log.is_open() = %d "
|
|
"and (options & OPTION_BIN_LOG) = 0x%llx "
|
|
"and binlog_format = %lu "
|
|
"and binlog_filter->db_ok(db) = %d",
|
|
mysql_bin_log.is_open(),
|
|
(variables.option_bits & OPTION_BIN_LOG),
|
|
WSREP_FORMAT(variables.binlog_format),
|
|
binlog_filter->db_ok(db)));
|
|
#endif
|
|
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
|
|
/*
|
|
Implementation of interface to write rows to the binary log through the
|
|
thread. The thread is responsible for writing the rows it has
|
|
inserted/updated/deleted.
|
|
*/
|
|
|
|
#ifndef MYSQL_CLIENT
|
|
|
|
/*
|
|
Template member function for ensuring that there is an rows log
|
|
event of the apropriate type before proceeding.
|
|
|
|
PRE CONDITION:
|
|
- Events of type 'RowEventT' have the type code 'type_code'.
|
|
|
|
POST CONDITION:
|
|
If a non-NULL pointer is returned, the pending event for thread 'thd' will
|
|
be an event of type 'RowEventT' (which have the type code 'type_code')
|
|
will either empty or have enough space to hold 'needed' bytes. In
|
|
addition, the columns bitmap will be correct for the row, meaning that
|
|
the pending event will be flushed if the columns in the event differ from
|
|
the columns suppled to the function.
|
|
|
|
RETURNS
|
|
If no error, a non-NULL pending event (either one which already existed or
|
|
the newly created one).
|
|
If error, NULL.
|
|
*/
|
|
|
|
template <class RowsEventT> Rows_log_event*
|
|
THD::binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
|
|
MY_BITMAP const* cols,
|
|
size_t colcnt,
|
|
size_t needed,
|
|
bool is_transactional,
|
|
RowsEventT *hint __attribute__((unused)))
|
|
{
|
|
DBUG_ENTER("binlog_prepare_pending_rows_event");
|
|
/* Pre-conditions */
|
|
DBUG_ASSERT(table->s->table_map_id != ~0UL);
|
|
|
|
/* Fetch the type code for the RowsEventT template parameter */
|
|
int const general_type_code= RowsEventT::TYPE_CODE;
|
|
|
|
/* Ensure that all events in a GTID group are in the same cache */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
is_transactional= 1;
|
|
|
|
/*
|
|
There is no good place to set up the transactional data, so we
|
|
have to do it here.
|
|
*/
|
|
if (binlog_setup_trx_data() == NULL)
|
|
DBUG_RETURN(NULL);
|
|
|
|
Rows_log_event* pending= binlog_get_pending_rows_event(is_transactional);
|
|
|
|
if (unlikely(pending && !pending->is_valid()))
|
|
DBUG_RETURN(NULL);
|
|
|
|
/*
|
|
Check if the current event is non-NULL and a write-rows
|
|
event. Also check if the table provided is mapped: if it is not,
|
|
then we have switched to writing to a new table.
|
|
If there is no pending event, we need to create one. If there is a pending
|
|
event, but it's not about the same table id, or not of the same type
|
|
(between Write, Update and Delete), or not the same affected columns, or
|
|
going to be too big, flush this event to disk and create a new pending
|
|
event.
|
|
*/
|
|
if (!pending ||
|
|
pending->server_id != serv_id ||
|
|
pending->get_table_id() != table->s->table_map_id ||
|
|
pending->get_general_type_code() != general_type_code ||
|
|
pending->get_data_size() + needed > opt_binlog_rows_event_max_size ||
|
|
pending->get_width() != colcnt ||
|
|
!bitmap_cmp(pending->get_cols(), cols))
|
|
{
|
|
/* Create a new RowsEventT... */
|
|
Rows_log_event* const
|
|
ev= new RowsEventT(this, table, table->s->table_map_id, cols,
|
|
is_transactional);
|
|
if (unlikely(!ev))
|
|
DBUG_RETURN(NULL);
|
|
ev->server_id= serv_id; // I don't like this, it's too easy to forget.
|
|
/*
|
|
flush the pending event and replace it with the newly created
|
|
event...
|
|
*/
|
|
if (unlikely(
|
|
mysql_bin_log.flush_and_set_pending_rows_event(this, ev,
|
|
is_transactional)))
|
|
{
|
|
delete ev;
|
|
DBUG_RETURN(NULL);
|
|
}
|
|
|
|
DBUG_RETURN(ev); /* This is the new pending event */
|
|
}
|
|
DBUG_RETURN(pending); /* This is the current pending event */
|
|
}
|
|
|
|
/* Declare in unnamed namespace. */
|
|
CPP_UNNAMED_NS_START
|
|
/**
|
|
Class to handle temporary allocation of memory for row data.
|
|
|
|
The responsibilities of the class is to provide memory for
|
|
packing one or two rows of packed data (depending on what
|
|
constructor is called).
|
|
|
|
In order to make the allocation more efficient for "simple" rows,
|
|
i.e., rows that do not contain any blobs, a pointer to the
|
|
allocated memory is of memory is stored in the table structure
|
|
for simple rows. If memory for a table containing a blob field
|
|
is requested, only memory for that is allocated, and subsequently
|
|
released when the object is destroyed.
|
|
|
|
*/
|
|
class Row_data_memory {
|
|
public:
|
|
/**
|
|
Build an object to keep track of a block-local piece of memory
|
|
for storing a row of data.
|
|
|
|
@param table
|
|
Table where the pre-allocated memory is stored.
|
|
|
|
@param length
|
|
Length of data that is needed, if the record contain blobs.
|
|
*/
|
|
Row_data_memory(TABLE *table, size_t const len1)
|
|
: m_memory(0)
|
|
{
|
|
#ifndef DBUG_OFF
|
|
m_alloc_checked= FALSE;
|
|
#endif
|
|
allocate_memory(table, len1);
|
|
m_ptr[0]= has_memory() ? m_memory : 0;
|
|
m_ptr[1]= 0;
|
|
}
|
|
|
|
Row_data_memory(TABLE *table, size_t const len1, size_t const len2)
|
|
: m_memory(0)
|
|
{
|
|
#ifndef DBUG_OFF
|
|
m_alloc_checked= FALSE;
|
|
#endif
|
|
allocate_memory(table, len1 + len2);
|
|
m_ptr[0]= has_memory() ? m_memory : 0;
|
|
m_ptr[1]= has_memory() ? m_memory + len1 : 0;
|
|
}
|
|
|
|
~Row_data_memory()
|
|
{
|
|
if (m_memory != 0 && m_release_memory_on_destruction)
|
|
my_free(m_memory);
|
|
}
|
|
|
|
/**
|
|
Is there memory allocated?
|
|
|
|
@retval true There is memory allocated
|
|
@retval false Memory allocation failed
|
|
*/
|
|
bool has_memory() const {
|
|
#ifndef DBUG_OFF
|
|
m_alloc_checked= TRUE;
|
|
#endif
|
|
return m_memory != 0;
|
|
}
|
|
|
|
uchar *slot(uint s)
|
|
{
|
|
DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr));
|
|
DBUG_ASSERT(m_ptr[s] != 0);
|
|
DBUG_ASSERT(m_alloc_checked == TRUE);
|
|
return m_ptr[s];
|
|
}
|
|
|
|
private:
|
|
void allocate_memory(TABLE *const table, size_t const total_length)
|
|
{
|
|
if (table->s->blob_fields == 0)
|
|
{
|
|
/*
|
|
The maximum length of a packed record is less than this
|
|
length. We use this value instead of the supplied length
|
|
when allocating memory for records, since we don't know how
|
|
the memory will be used in future allocations.
|
|
|
|
Since table->s->reclength is for unpacked records, we have
|
|
to add two bytes for each field, which can potentially be
|
|
added to hold the length of a packed field.
|
|
*/
|
|
size_t const maxlen= table->s->reclength + 2 * table->s->fields;
|
|
|
|
/*
|
|
Allocate memory for two records if memory hasn't been
|
|
allocated. We allocate memory for two records so that it can
|
|
be used when processing update rows as well.
|
|
*/
|
|
if (table->write_row_record == 0)
|
|
table->write_row_record=
|
|
(uchar *) alloc_root(&table->mem_root, 2 * maxlen);
|
|
m_memory= table->write_row_record;
|
|
m_release_memory_on_destruction= FALSE;
|
|
}
|
|
else
|
|
{
|
|
m_memory= (uchar *) my_malloc(total_length, MYF(MY_WME));
|
|
m_release_memory_on_destruction= TRUE;
|
|
}
|
|
}
|
|
|
|
#ifndef DBUG_OFF
|
|
mutable bool m_alloc_checked;
|
|
#endif
|
|
bool m_release_memory_on_destruction;
|
|
uchar *m_memory;
|
|
uchar *m_ptr[2];
|
|
};
|
|
|
|
CPP_UNNAMED_NS_END
|
|
|
|
int THD::binlog_write_row(TABLE* table, bool is_trans,
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
uchar const *record)
|
|
{
|
|
#ifdef WITH_WSREP
|
|
DBUG_ASSERT(is_current_stmt_binlog_format_row() &&
|
|
((WSREP(this) && wsrep_emulate_bin_log) ||
|
|
mysql_bin_log.is_open()));
|
|
#else
|
|
DBUG_ASSERT(is_current_stmt_binlog_format_row() && mysql_bin_log.is_open());
|
|
#endif
|
|
|
|
/*
|
|
Pack records into format for transfer. We are allocating more
|
|
memory than needed, but that doesn't matter.
|
|
*/
|
|
Row_data_memory memory(table, max_row_length(table, record));
|
|
if (!memory.has_memory())
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
uchar *row_data= memory.slot(0);
|
|
|
|
size_t const len= pack_row(table, cols, row_data, record);
|
|
|
|
/* Ensure that all events in a GTID group are in the same cache */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
is_trans= 1;
|
|
|
|
Rows_log_event* const ev=
|
|
binlog_prepare_pending_rows_event(table, variables.server_id, cols, colcnt,
|
|
len, is_trans,
|
|
static_cast<Write_rows_log_event*>(0));
|
|
|
|
if (unlikely(ev == 0))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
return ev->add_row_data(row_data, len);
|
|
}
|
|
|
|
int THD::binlog_update_row(TABLE* table, bool is_trans,
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
const uchar *before_record,
|
|
const uchar *after_record)
|
|
{
|
|
#ifdef WITH_WSREP
|
|
DBUG_ASSERT(is_current_stmt_binlog_format_row() &&
|
|
((WSREP(this) && wsrep_emulate_bin_log)
|
|
|| mysql_bin_log.is_open()));
|
|
#else
|
|
DBUG_ASSERT(is_current_stmt_binlog_format_row() && mysql_bin_log.is_open());
|
|
#endif
|
|
|
|
size_t const before_maxlen = max_row_length(table, before_record);
|
|
size_t const after_maxlen = max_row_length(table, after_record);
|
|
|
|
Row_data_memory row_data(table, before_maxlen, after_maxlen);
|
|
if (!row_data.has_memory())
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
uchar *before_row= row_data.slot(0);
|
|
uchar *after_row= row_data.slot(1);
|
|
|
|
size_t const before_size= pack_row(table, cols, before_row,
|
|
before_record);
|
|
size_t const after_size= pack_row(table, cols, after_row,
|
|
after_record);
|
|
|
|
/* Ensure that all events in a GTID group are in the same cache */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
is_trans= 1;
|
|
|
|
/*
|
|
Don't print debug messages when running valgrind since they can
|
|
trigger false warnings.
|
|
*/
|
|
#ifndef HAVE_valgrind
|
|
DBUG_DUMP("before_record", before_record, table->s->reclength);
|
|
DBUG_DUMP("after_record", after_record, table->s->reclength);
|
|
DBUG_DUMP("before_row", before_row, before_size);
|
|
DBUG_DUMP("after_row", after_row, after_size);
|
|
#endif
|
|
|
|
Rows_log_event* const ev=
|
|
binlog_prepare_pending_rows_event(table, variables.server_id, cols, colcnt,
|
|
before_size + after_size, is_trans,
|
|
static_cast<Update_rows_log_event*>(0));
|
|
|
|
if (unlikely(ev == 0))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
return
|
|
ev->add_row_data(before_row, before_size) ||
|
|
ev->add_row_data(after_row, after_size);
|
|
}
|
|
|
|
int THD::binlog_delete_row(TABLE* table, bool is_trans,
|
|
MY_BITMAP const* cols, size_t colcnt,
|
|
uchar const *record)
|
|
{
|
|
#ifdef WITH_WSREP
|
|
DBUG_ASSERT(is_current_stmt_binlog_format_row() &&
|
|
((WSREP(this) && wsrep_emulate_bin_log)
|
|
|| mysql_bin_log.is_open()));
|
|
#else
|
|
DBUG_ASSERT(is_current_stmt_binlog_format_row() && mysql_bin_log.is_open());
|
|
#endif
|
|
|
|
/*
|
|
Pack records into format for transfer. We are allocating more
|
|
memory than needed, but that doesn't matter.
|
|
*/
|
|
Row_data_memory memory(table, max_row_length(table, record));
|
|
if (unlikely(!memory.has_memory()))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
uchar *row_data= memory.slot(0);
|
|
|
|
size_t const len= pack_row(table, cols, row_data, record);
|
|
|
|
/* Ensure that all events in a GTID group are in the same cache */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
is_trans= 1;
|
|
|
|
Rows_log_event* const ev=
|
|
binlog_prepare_pending_rows_event(table, variables.server_id, cols, colcnt,
|
|
len, is_trans,
|
|
static_cast<Delete_rows_log_event*>(0));
|
|
|
|
if (unlikely(ev == 0))
|
|
return HA_ERR_OUT_OF_MEM;
|
|
|
|
return ev->add_row_data(row_data, len);
|
|
}
|
|
|
|
|
|
int THD::binlog_remove_pending_rows_event(bool clear_maps,
|
|
bool is_transactional)
|
|
{
|
|
DBUG_ENTER("THD::binlog_remove_pending_rows_event");
|
|
|
|
#ifdef WITH_WSREP
|
|
if (!(WSREP_EMULATE_BINLOG(this) || mysql_bin_log.is_open()))
|
|
#else
|
|
if (!mysql_bin_log.is_open())
|
|
#endif
|
|
DBUG_RETURN(0);
|
|
|
|
/* Ensure that all events in a GTID group are in the same cache */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
is_transactional= 1;
|
|
|
|
mysql_bin_log.remove_pending_rows_event(this, is_transactional);
|
|
|
|
if (clear_maps)
|
|
binlog_table_maps= 0;
|
|
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
int THD::binlog_flush_pending_rows_event(bool stmt_end, bool is_transactional)
|
|
{
|
|
DBUG_ENTER("THD::binlog_flush_pending_rows_event");
|
|
/*
|
|
We shall flush the pending event even if we are not in row-based
|
|
mode: it might be the case that we left row-based mode before
|
|
flushing anything (e.g., if we have explicitly locked tables).
|
|
*/
|
|
#ifdef WITH_WSREP
|
|
if (!(WSREP_EMULATE_BINLOG(this) || mysql_bin_log.is_open()))
|
|
#else
|
|
if (!mysql_bin_log.is_open())
|
|
#endif
|
|
DBUG_RETURN(0);
|
|
|
|
/* Ensure that all events in a GTID group are in the same cache */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
is_transactional= 1;
|
|
|
|
/*
|
|
Mark the event as the last event of a statement if the stmt_end
|
|
flag is set.
|
|
*/
|
|
int error= 0;
|
|
if (Rows_log_event *pending= binlog_get_pending_rows_event(is_transactional))
|
|
{
|
|
if (stmt_end)
|
|
{
|
|
pending->set_flags(Rows_log_event::STMT_END_F);
|
|
binlog_table_maps= 0;
|
|
}
|
|
|
|
error= mysql_bin_log.flush_and_set_pending_rows_event(this, 0,
|
|
is_transactional);
|
|
}
|
|
|
|
DBUG_RETURN(error);
|
|
}
|
|
|
|
|
|
#if !defined(DBUG_OFF) && !defined(_lint)
|
|
static const char *
|
|
show_query_type(THD::enum_binlog_query_type qtype)
|
|
{
|
|
switch (qtype) {
|
|
case THD::ROW_QUERY_TYPE:
|
|
return "ROW";
|
|
case THD::STMT_QUERY_TYPE:
|
|
return "STMT";
|
|
case THD::QUERY_TYPE_COUNT:
|
|
default:
|
|
DBUG_ASSERT(0 <= qtype && qtype < THD::QUERY_TYPE_COUNT);
|
|
}
|
|
static char buf[64];
|
|
sprintf(buf, "UNKNOWN#%d", qtype);
|
|
return buf;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
Constants required for the limit unsafe warnings suppression
|
|
*/
|
|
//seconds after which the limit unsafe warnings suppression will be activated
|
|
#define LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT 50
|
|
//number of limit unsafe warnings after which the suppression will be activated
|
|
#define LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT 50
|
|
|
|
static ulonglong limit_unsafe_suppression_start_time= 0;
|
|
static bool unsafe_warning_suppression_is_activated= false;
|
|
static int limit_unsafe_warning_count= 0;
|
|
|
|
/**
|
|
Auxiliary function to reset the limit unsafety warning suppression.
|
|
*/
|
|
static void reset_binlog_unsafe_suppression()
|
|
{
|
|
DBUG_ENTER("reset_binlog_unsafe_suppression");
|
|
unsafe_warning_suppression_is_activated= false;
|
|
limit_unsafe_warning_count= 0;
|
|
limit_unsafe_suppression_start_time= my_interval_timer()/10000000;
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/**
|
|
Auxiliary function to print warning in the error log.
|
|
*/
|
|
static void print_unsafe_warning_to_log(int unsafe_type, char* buf,
|
|
char* query)
|
|
{
|
|
DBUG_ENTER("print_unsafe_warning_in_log");
|
|
sprintf(buf, ER(ER_BINLOG_UNSAFE_STATEMENT),
|
|
ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]));
|
|
sql_print_warning(ER(ER_MESSAGE_AND_STATEMENT), buf, query);
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/**
|
|
Auxiliary function to check if the warning for limit unsafety should be
|
|
thrown or suppressed. Details of the implementation can be found in the
|
|
comments inline.
|
|
SYNOPSIS:
|
|
@params
|
|
buf - buffer to hold the warning message text
|
|
unsafe_type - The type of unsafety.
|
|
query - The actual query statement.
|
|
|
|
TODO: Remove this function and implement a general service for all warnings
|
|
that would prevent flooding the error log.
|
|
*/
|
|
static void do_unsafe_limit_checkout(char* buf, int unsafe_type, char* query)
|
|
{
|
|
ulonglong now= 0;
|
|
DBUG_ENTER("do_unsafe_limit_checkout");
|
|
DBUG_ASSERT(unsafe_type == LEX::BINLOG_STMT_UNSAFE_LIMIT);
|
|
limit_unsafe_warning_count++;
|
|
/*
|
|
INITIALIZING:
|
|
If this is the first time this function is called with log warning
|
|
enabled, the monitoring the unsafe warnings should start.
|
|
*/
|
|
if (limit_unsafe_suppression_start_time == 0)
|
|
{
|
|
limit_unsafe_suppression_start_time= my_interval_timer()/10000000;
|
|
print_unsafe_warning_to_log(unsafe_type, buf, query);
|
|
}
|
|
else
|
|
{
|
|
if (!unsafe_warning_suppression_is_activated)
|
|
print_unsafe_warning_to_log(unsafe_type, buf, query);
|
|
|
|
if (limit_unsafe_warning_count >=
|
|
LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT)
|
|
{
|
|
now= my_interval_timer()/10000000;
|
|
if (!unsafe_warning_suppression_is_activated)
|
|
{
|
|
/*
|
|
ACTIVATION:
|
|
We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT warnings in
|
|
less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT we activate the
|
|
suppression.
|
|
*/
|
|
if ((now-limit_unsafe_suppression_start_time) <=
|
|
LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT)
|
|
{
|
|
unsafe_warning_suppression_is_activated= true;
|
|
DBUG_PRINT("info",("A warning flood has been detected and the limit \
|
|
unsafety warning suppression has been activated."));
|
|
}
|
|
else
|
|
{
|
|
/*
|
|
there is no flooding till now, therefore we restart the monitoring
|
|
*/
|
|
limit_unsafe_suppression_start_time= my_interval_timer()/10000000;
|
|
limit_unsafe_warning_count= 0;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/*
|
|
Print the suppression note and the unsafe warning.
|
|
*/
|
|
sql_print_information("The following warning was suppressed %d times \
|
|
during the last %d seconds in the error log",
|
|
limit_unsafe_warning_count,
|
|
(int)
|
|
(now-limit_unsafe_suppression_start_time));
|
|
print_unsafe_warning_to_log(unsafe_type, buf, query);
|
|
/*
|
|
DEACTIVATION: We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT
|
|
warnings in more than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT, the
|
|
suppression should be deactivated.
|
|
*/
|
|
if ((now - limit_unsafe_suppression_start_time) >
|
|
LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT)
|
|
{
|
|
reset_binlog_unsafe_suppression();
|
|
DBUG_PRINT("info",("The limit unsafety warning supression has been \
|
|
deactivated"));
|
|
}
|
|
}
|
|
limit_unsafe_warning_count= 0;
|
|
}
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/**
|
|
Auxiliary method used by @c binlog_query() to raise warnings.
|
|
|
|
The type of warning and the type of unsafeness is stored in
|
|
THD::binlog_unsafe_warning_flags.
|
|
*/
|
|
void THD::issue_unsafe_warnings()
|
|
{
|
|
char buf[MYSQL_ERRMSG_SIZE * 2];
|
|
DBUG_ENTER("issue_unsafe_warnings");
|
|
/*
|
|
Ensure that binlog_unsafe_warning_flags is big enough to hold all
|
|
bits. This is actually a constant expression.
|
|
*/
|
|
DBUG_ASSERT(LEX::BINLOG_STMT_UNSAFE_COUNT <=
|
|
sizeof(binlog_unsafe_warning_flags) * CHAR_BIT);
|
|
|
|
uint32 unsafe_type_flags= binlog_unsafe_warning_flags;
|
|
/*
|
|
For each unsafe_type, check if the statement is unsafe in this way
|
|
and issue a warning.
|
|
*/
|
|
for (int unsafe_type=0;
|
|
unsafe_type < LEX::BINLOG_STMT_UNSAFE_COUNT;
|
|
unsafe_type++)
|
|
{
|
|
if ((unsafe_type_flags & (1 << unsafe_type)) != 0)
|
|
{
|
|
push_warning_printf(this, Sql_condition::WARN_LEVEL_NOTE,
|
|
ER_BINLOG_UNSAFE_STATEMENT,
|
|
ER(ER_BINLOG_UNSAFE_STATEMENT),
|
|
ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]));
|
|
if (global_system_variables.log_warnings)
|
|
{
|
|
if (unsafe_type == LEX::BINLOG_STMT_UNSAFE_LIMIT)
|
|
do_unsafe_limit_checkout( buf, unsafe_type, query());
|
|
else //cases other than LIMIT unsafety
|
|
print_unsafe_warning_to_log(unsafe_type, buf, query());
|
|
}
|
|
}
|
|
}
|
|
DBUG_VOID_RETURN;
|
|
}
|
|
|
|
/**
|
|
Log the current query.
|
|
|
|
The query will be logged in either row format or statement format
|
|
depending on the value of @c current_stmt_binlog_format_row field and
|
|
the value of the @c qtype parameter.
|
|
|
|
This function must be called:
|
|
|
|
- After the all calls to ha_*_row() functions have been issued.
|
|
|
|
- After any writes to system tables. Rationale: if system tables
|
|
were written after a call to this function, and the master crashes
|
|
after the call to this function and before writing the system
|
|
tables, then the master and slave get out of sync.
|
|
|
|
- Before tables are unlocked and closed.
|
|
|
|
@see decide_logging_format
|
|
|
|
@retval 0 Success
|
|
|
|
@retval nonzero If there is a failure when writing the query (e.g.,
|
|
write failure), then the error code is returned.
|
|
*/
|
|
int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
|
|
ulong query_len, bool is_trans, bool direct,
|
|
bool suppress_use, int errcode)
|
|
{
|
|
DBUG_ENTER("THD::binlog_query");
|
|
DBUG_PRINT("enter", ("qtype: %s query: '%-.*s'",
|
|
show_query_type(qtype), (int) query_len, query_arg));
|
|
#ifdef WITH_WSREP
|
|
DBUG_ASSERT(query_arg && (WSREP_EMULATE_BINLOG(this)
|
|
|| mysql_bin_log.is_open()));
|
|
#else
|
|
DBUG_ASSERT(query_arg && mysql_bin_log.is_open());
|
|
#endif
|
|
|
|
/* If this is withing a BEGIN ... COMMIT group, don't log it */
|
|
if (variables.option_bits & OPTION_GTID_BEGIN)
|
|
{
|
|
direct= 0;
|
|
is_trans= 1;
|
|
}
|
|
DBUG_PRINT("info", ("is_trans: %d direct: %d", is_trans, direct));
|
|
|
|
if (get_binlog_local_stmt_filter() == BINLOG_FILTER_SET)
|
|
{
|
|
/*
|
|
The current statement is to be ignored, and not written to
|
|
the binlog. Do not call issue_unsafe_warnings().
|
|
*/
|
|
DBUG_RETURN(0);
|
|
}
|
|
/*
|
|
If we are not in prelocked mode, mysql_unlock_tables() will be
|
|
called after this binlog_query(), so we have to flush the pending
|
|
rows event with the STMT_END_F set to unlock all tables at the
|
|
slave side as well.
|
|
|
|
If we are in prelocked mode, the flushing will be done inside the
|
|
top-most close_thread_tables().
|
|
*/
|
|
if (this->locked_tables_mode <= LTM_LOCK_TABLES)
|
|
if (int error= binlog_flush_pending_rows_event(TRUE, is_trans))
|
|
DBUG_RETURN(error);
|
|
|
|
/*
|
|
Warnings for unsafe statements logged in statement format are
|
|
printed in three places instead of in decide_logging_format().
|
|
This is because the warnings should be printed only if the statement
|
|
is actually logged. When executing decide_logging_format(), we cannot
|
|
know for sure if the statement will be logged:
|
|
|
|
1 - sp_head::execute_procedure which prints out warnings for calls to
|
|
stored procedures.
|
|
|
|
2 - sp_head::execute_function which prints out warnings for calls
|
|
involving functions.
|
|
|
|
3 - THD::binlog_query (here) which prints warning for top level
|
|
statements not covered by the two cases above: i.e., if not insided a
|
|
procedure and a function.
|
|
|
|
Besides, we should not try to print these warnings if it is not
|
|
possible to write statements to the binary log as it happens when
|
|
the execution is inside a function, or generaly speaking, when
|
|
the variables.option_bits & OPTION_BIN_LOG is false.
|
|
|
|
*/
|
|
if ((variables.option_bits & OPTION_BIN_LOG) &&
|
|
spcont == NULL && !binlog_evt_union.do_union)
|
|
issue_unsafe_warnings();
|
|
|
|
switch (qtype) {
|
|
/*
|
|
ROW_QUERY_TYPE means that the statement may be logged either in
|
|
row format or in statement format. If
|
|
current_stmt_binlog_format is row, it means that the
|
|
statement has already been logged in row format and hence shall
|
|
not be logged again.
|
|
*/
|
|
case THD::ROW_QUERY_TYPE:
|
|
DBUG_PRINT("debug",
|
|
("is_current_stmt_binlog_format_row: %d",
|
|
is_current_stmt_binlog_format_row()));
|
|
if (is_current_stmt_binlog_format_row())
|
|
DBUG_RETURN(0);
|
|
/* Fall through */
|
|
|
|
/*
|
|
STMT_QUERY_TYPE means that the query must be logged in statement
|
|
format; it cannot be logged in row format. This is typically
|
|
used by DDL statements. It is an error to use this query type
|
|
if current_stmt_binlog_format_row is row.
|
|
|
|
@todo Currently there are places that call this method with
|
|
STMT_QUERY_TYPE and current_stmt_binlog_format is row. Fix those
|
|
places and add assert to ensure correct behavior. /Sven
|
|
*/
|
|
case THD::STMT_QUERY_TYPE:
|
|
/*
|
|
The MYSQL_LOG::write() function will set the STMT_END_F flag and
|
|
flush the pending rows event if necessary.
|
|
*/
|
|
{
|
|
Query_log_event qinfo(this, query_arg, query_len, is_trans, direct,
|
|
suppress_use, errcode);
|
|
/*
|
|
Binlog table maps will be irrelevant after a Query_log_event
|
|
(they are just removed on the slave side) so after the query
|
|
log event is written to the binary log, we pretend that no
|
|
table maps were written.
|
|
*/
|
|
int error= mysql_bin_log.write(&qinfo);
|
|
binlog_table_maps= 0;
|
|
DBUG_RETURN(error);
|
|
}
|
|
|
|
case THD::QUERY_TYPE_COUNT:
|
|
default:
|
|
DBUG_ASSERT(qtype < QUERY_TYPE_COUNT);
|
|
}
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
void
|
|
THD::wait_for_wakeup_ready()
|
|
{
|
|
mysql_mutex_lock(&LOCK_wakeup_ready);
|
|
while (!wakeup_ready)
|
|
mysql_cond_wait(&COND_wakeup_ready, &LOCK_wakeup_ready);
|
|
mysql_mutex_unlock(&LOCK_wakeup_ready);
|
|
}
|
|
|
|
void
|
|
THD::signal_wakeup_ready()
|
|
{
|
|
mysql_mutex_lock(&LOCK_wakeup_ready);
|
|
wakeup_ready= true;
|
|
mysql_mutex_unlock(&LOCK_wakeup_ready);
|
|
mysql_cond_signal(&COND_wakeup_ready);
|
|
}
|
|
|
|
|
|
void THD::rgi_lock_temporary_tables()
|
|
{
|
|
mysql_mutex_lock(&rgi_slave->rli->data_lock);
|
|
temporary_tables= rgi_slave->rli->save_temporary_tables;
|
|
}
|
|
|
|
void THD::rgi_unlock_temporary_tables()
|
|
{
|
|
rgi_slave->rli->save_temporary_tables= temporary_tables;
|
|
mysql_mutex_unlock(&rgi_slave->rli->data_lock);
|
|
}
|
|
|
|
bool THD::rgi_have_temporary_tables()
|
|
{
|
|
return rgi_slave->rli->save_temporary_tables != 0;
|
|
}
|
|
|
|
|
|
void
|
|
wait_for_commit::reinit()
|
|
{
|
|
subsequent_commits_list= NULL;
|
|
next_subsequent_commit= NULL;
|
|
waitee= NULL;
|
|
opaque_pointer= NULL;
|
|
wakeup_error= 0;
|
|
wakeup_subsequent_commits_running= false;
|
|
}
|
|
|
|
|
|
wait_for_commit::wait_for_commit()
|
|
{
|
|
mysql_mutex_init(key_LOCK_wait_commit, &LOCK_wait_commit, MY_MUTEX_INIT_FAST);
|
|
mysql_cond_init(key_COND_wait_commit, &COND_wait_commit, 0);
|
|
reinit();
|
|
}
|
|
|
|
|
|
wait_for_commit::~wait_for_commit()
|
|
{
|
|
/*
|
|
Since we do a dirty read of the waiting_for_commit flag in
|
|
wait_for_prior_commit() and in unregister_wait_for_prior_commit(), we need
|
|
to take extra care before freeing the wait_for_commit object.
|
|
|
|
It is possible for the waitee to be pre-empted inside wakeup(), just after
|
|
it has cleared the waiting_for_commit flag and before it has released the
|
|
LOCK_wait_commit mutex. And then it is possible for the waiter to find the
|
|
flag cleared in wait_for_prior_commit() and go finish up things and
|
|
de-allocate the LOCK_wait_commit and COND_wait_commit objects before the
|
|
waitee has time to be re-scheduled and finish unlocking the mutex and
|
|
signalling the condition. This would lead to the waitee accessing no
|
|
longer valid memory.
|
|
|
|
To prevent this, we do an extra lock/unlock of the mutex here before
|
|
deallocation; this makes certain that any waitee has completed wakeup()
|
|
first.
|
|
*/
|
|
mysql_mutex_lock(&LOCK_wait_commit);
|
|
mysql_mutex_unlock(&LOCK_wait_commit);
|
|
|
|
mysql_mutex_destroy(&LOCK_wait_commit);
|
|
mysql_cond_destroy(&COND_wait_commit);
|
|
}
|
|
|
|
|
|
void
|
|
wait_for_commit::wakeup(int wakeup_error)
|
|
{
|
|
/*
|
|
We signal each waiter on their own condition and mutex (rather than using
|
|
pthread_cond_broadcast() or something like that).
|
|
|
|
Otherwise we would need to somehow ensure that they were done
|
|
waking up before we could allow this THD to be destroyed, which would
|
|
be annoying and unnecessary.
|
|
|
|
Note that wakeup_subsequent_commits2() depends on this function being a
|
|
full memory barrier (it is, because it takes a mutex lock).
|
|
|
|
*/
|
|
mysql_mutex_lock(&LOCK_wait_commit);
|
|
waitee= NULL;
|
|
this->wakeup_error= wakeup_error;
|
|
/*
|
|
Note that it is critical that the mysql_cond_signal() here is done while
|
|
still holding the mutex. As soon as we release the mutex, the waiter might
|
|
deallocate the condition object.
|
|
*/
|
|
mysql_cond_signal(&COND_wait_commit);
|
|
mysql_mutex_unlock(&LOCK_wait_commit);
|
|
}
|
|
|
|
|
|
/*
|
|
Register that the next commit of this THD should wait to complete until
|
|
commit in another THD (the waitee) has completed.
|
|
|
|
The wait may occur explicitly, with the waiter sitting in
|
|
wait_for_prior_commit() until the waitee calls wakeup_subsequent_commits().
|
|
|
|
Alternatively, the TC (eg. binlog) may do the commits of both waitee and
|
|
waiter at once during group commit, resolving both of them in the right
|
|
order.
|
|
|
|
Only one waitee can be registered for a waiter; it must be removed by
|
|
wait_for_prior_commit() or unregister_wait_for_prior_commit() before a new
|
|
one is registered. But it is ok for several waiters to register a wait for
|
|
the same waitee. It is also permissible for one THD to be both a waiter and
|
|
a waitee at the same time.
|
|
*/
|
|
void
|
|
wait_for_commit::register_wait_for_prior_commit(wait_for_commit *waitee)
|
|
{
|
|
DBUG_ASSERT(!this->waitee /* No prior registration allowed */);
|
|
wakeup_error= 0;
|
|
this->waitee= waitee;
|
|
|
|
mysql_mutex_lock(&waitee->LOCK_wait_commit);
|
|
/*
|
|
If waitee is in the middle of wakeup, then there is nothing to wait for,
|
|
so we need not register. This is necessary to avoid a race in unregister,
|
|
see comments on wakeup_subsequent_commits2() for details.
|
|
*/
|
|
if (waitee->wakeup_subsequent_commits_running)
|
|
this->waitee= NULL;
|
|
else
|
|
{
|
|
/*
|
|
Put ourself at the head of the waitee's list of transactions that must
|
|
wait for it to commit first.
|
|
*/
|
|
this->next_subsequent_commit= waitee->subsequent_commits_list;
|
|
waitee->subsequent_commits_list= this;
|
|
}
|
|
mysql_mutex_unlock(&waitee->LOCK_wait_commit);
|
|
}
|
|
|
|
|
|
/*
|
|
Wait for commit of another transaction to complete, as already registered
|
|
with register_wait_for_prior_commit(). If the commit already completed,
|
|
returns immediately.
|
|
*/
|
|
int
|
|
wait_for_commit::wait_for_prior_commit2(THD *thd)
|
|
{
|
|
PSI_stage_info old_stage;
|
|
wait_for_commit *loc_waitee;
|
|
|
|
mysql_mutex_lock(&LOCK_wait_commit);
|
|
DEBUG_SYNC(thd, "wait_for_prior_commit_waiting");
|
|
thd->ENTER_COND(&COND_wait_commit, &LOCK_wait_commit,
|
|
&stage_waiting_for_prior_transaction_to_commit,
|
|
&old_stage);
|
|
while ((loc_waitee= this->waitee) && !thd->check_killed())
|
|
mysql_cond_wait(&COND_wait_commit, &LOCK_wait_commit);
|
|
if (!loc_waitee)
|
|
{
|
|
if (wakeup_error)
|
|
my_error(ER_PRIOR_COMMIT_FAILED, MYF(0));
|
|
goto end;
|
|
}
|
|
/*
|
|
Wait was interrupted by kill. We need to unregister our wait and give the
|
|
error. But if a wakeup is already in progress, then we must ignore the
|
|
kill and not give error, otherwise we get inconsistency between waitee and
|
|
waiter as to whether we succeed or fail (eg. we may roll back but waitee
|
|
might attempt to commit both us and any subsequent commits waiting for us).
|
|
*/
|
|
mysql_mutex_lock(&loc_waitee->LOCK_wait_commit);
|
|
if (loc_waitee->wakeup_subsequent_commits_running)
|
|
{
|
|
/* We are being woken up; ignore the kill and just wait. */
|
|
mysql_mutex_unlock(&loc_waitee->LOCK_wait_commit);
|
|
do
|
|
{
|
|
mysql_cond_wait(&COND_wait_commit, &LOCK_wait_commit);
|
|
} while (this->waitee);
|
|
if (wakeup_error)
|
|
my_error(ER_PRIOR_COMMIT_FAILED, MYF(0));
|
|
goto end;
|
|
}
|
|
remove_from_list(&loc_waitee->subsequent_commits_list);
|
|
mysql_mutex_unlock(&loc_waitee->LOCK_wait_commit);
|
|
this->waitee= NULL;
|
|
|
|
wakeup_error= thd->killed_errno();
|
|
if (!wakeup_error)
|
|
wakeup_error= ER_QUERY_INTERRUPTED;
|
|
my_message(wakeup_error, ER(wakeup_error), MYF(0));
|
|
thd->EXIT_COND(&old_stage);
|
|
/*
|
|
Must do the DEBUG_SYNC() _after_ exit_cond(), as DEBUG_SYNC is not safe to
|
|
use within enter_cond/exit_cond.
|
|
*/
|
|
DEBUG_SYNC(thd, "wait_for_prior_commit_killed");
|
|
return wakeup_error;
|
|
|
|
end:
|
|
thd->EXIT_COND(&old_stage);
|
|
return wakeup_error;
|
|
}
|
|
|
|
|
|
/*
|
|
Wakeup anyone waiting for us to have committed.
|
|
|
|
Note about locking:
|
|
|
|
We have a potential race or deadlock between wakeup_subsequent_commits() in
|
|
the waitee and unregister_wait_for_prior_commit() in the waiter.
|
|
|
|
Both waiter and waitee needs to take their own lock before it is safe to take
|
|
a lock on the other party - else the other party might disappear and invalid
|
|
memory data could be accessed. But if we take the two locks in different
|
|
order, we may end up in a deadlock.
|
|
|
|
The waiter needs to lock the waitee to delete itself from the list in
|
|
unregister_wait_for_prior_commit(). Thus wakeup_subsequent_commits() can not
|
|
hold its own lock while locking waiters, as this could lead to deadlock.
|
|
|
|
So we need to prevent unregister_wait_for_prior_commit() running while wakeup
|
|
is in progress - otherwise the unregister could complete before the wakeup,
|
|
leading to incorrect spurious wakeup or accessing invalid memory.
|
|
|
|
However, if we are in the middle of running wakeup_subsequent_commits(), then
|
|
there is no need for unregister_wait_for_prior_commit() in the first place -
|
|
the waiter can just do a normal wait_for_prior_commit(), as it will be
|
|
immediately woken up.
|
|
|
|
So the solution to the potential race/deadlock is to set a flag in the waitee
|
|
that wakeup_subsequent_commits() is in progress. When this flag is set,
|
|
unregister_wait_for_prior_commit() becomes just wait_for_prior_commit().
|
|
|
|
Then also register_wait_for_prior_commit() needs to check if
|
|
wakeup_subsequent_commits() is running, and skip the registration if
|
|
so. This is needed in case a new waiter manages to register itself and
|
|
immediately try to unregister while wakeup_subsequent_commits() is
|
|
running. Else the new waiter would also wait rather than unregister, but it
|
|
would not be woken up until next wakeup, which could be potentially much
|
|
later than necessary.
|
|
*/
|
|
|
|
void
|
|
wait_for_commit::wakeup_subsequent_commits2(int wakeup_error)
|
|
{
|
|
wait_for_commit *waiter;
|
|
|
|
mysql_mutex_lock(&LOCK_wait_commit);
|
|
wakeup_subsequent_commits_running= true;
|
|
waiter= subsequent_commits_list;
|
|
subsequent_commits_list= NULL;
|
|
mysql_mutex_unlock(&LOCK_wait_commit);
|
|
|
|
while (waiter)
|
|
{
|
|
/*
|
|
Important: we must grab the next pointer before waking up the waiter;
|
|
once the wakeup is done, the field could be invalidated at any time.
|
|
*/
|
|
wait_for_commit *next= waiter->next_subsequent_commit;
|
|
waiter->wakeup(wakeup_error);
|
|
waiter= next;
|
|
}
|
|
|
|
/*
|
|
We need a full memory barrier between walking the list above, and clearing
|
|
the flag wakeup_subsequent_commits_running below. This barrier is needed
|
|
to ensure that no other thread will start to modify the list pointers
|
|
before we are done traversing the list.
|
|
|
|
But wait_for_commit::wakeup() does a full memory barrier already (it locks
|
|
a mutex), so no extra explicit barrier is needed here.
|
|
*/
|
|
wakeup_subsequent_commits_running= false;
|
|
}
|
|
|
|
|
|
/* Cancel a previously registered wait for another THD to commit before us. */
|
|
void
|
|
wait_for_commit::unregister_wait_for_prior_commit2()
|
|
{
|
|
wait_for_commit *loc_waitee;
|
|
|
|
mysql_mutex_lock(&LOCK_wait_commit);
|
|
if ((loc_waitee= this->waitee))
|
|
{
|
|
mysql_mutex_lock(&loc_waitee->LOCK_wait_commit);
|
|
if (loc_waitee->wakeup_subsequent_commits_running)
|
|
{
|
|
/*
|
|
When a wakeup is running, we cannot safely remove ourselves from the
|
|
list without corrupting it. Instead we can just wait, as wakeup is
|
|
already in progress and will thus be immediate.
|
|
|
|
See comments on wakeup_subsequent_commits2() for more details.
|
|
*/
|
|
mysql_mutex_unlock(&loc_waitee->LOCK_wait_commit);
|
|
while (this->waitee)
|
|
mysql_cond_wait(&COND_wait_commit, &LOCK_wait_commit);
|
|
}
|
|
else
|
|
{
|
|
/* Remove ourselves from the list in the waitee. */
|
|
remove_from_list(&loc_waitee->subsequent_commits_list);
|
|
mysql_mutex_unlock(&loc_waitee->LOCK_wait_commit);
|
|
this->waitee= NULL;
|
|
}
|
|
}
|
|
mysql_mutex_unlock(&LOCK_wait_commit);
|
|
}
|
|
|
|
|
|
bool Discrete_intervals_list::append(ulonglong start, ulonglong val,
|
|
ulonglong incr)
|
|
{
|
|
DBUG_ENTER("Discrete_intervals_list::append");
|
|
/* first, see if this can be merged with previous */
|
|
if ((head == NULL) || tail->merge_if_contiguous(start, val, incr))
|
|
{
|
|
/* it cannot, so need to add a new interval */
|
|
Discrete_interval *new_interval= new Discrete_interval(start, val, incr);
|
|
DBUG_RETURN(append(new_interval));
|
|
}
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
bool Discrete_intervals_list::append(Discrete_interval *new_interval)
|
|
{
|
|
DBUG_ENTER("Discrete_intervals_list::append");
|
|
if (unlikely(new_interval == NULL))
|
|
DBUG_RETURN(1);
|
|
DBUG_PRINT("info",("adding new auto_increment interval"));
|
|
if (head == NULL)
|
|
head= current= new_interval;
|
|
else
|
|
tail->next= new_interval;
|
|
tail= new_interval;
|
|
elements++;
|
|
DBUG_RETURN(0);
|
|
}
|
|
|
|
#endif /* !defined(MYSQL_CLIENT) */
|