mirror of
https://github.com/MariaDB/server.git
synced 2025-03-23 15:38:41 +01:00
Merge branch '10.9' into 10.10
This commit is contained in:
commit
76bcea3154
140 changed files with 4096 additions and 705 deletions
CREDITS
client
cmake
dbug
extra
include
mysql-test
include
main
cte_recursive.resultcte_recursive.testctype_utf8.resultctype_utf8.testderived_cond_pushdown.resultderived_cond_pushdown.testlong_unique_bugs.resultlong_unique_bugs.testrowid_filter.resultselect.resultselect.testselect_jcl6.resultselect_pkeycache.resultsubselect2.resulttype_timestamp.resulttype_timestamp.testwin.resultwin.testwin_orderby.resultwin_orderby.test
std_data/mysql_upgrade
suite
binlog
encryption
galera
mariabackup
plugins
rpl
plugin/type_uuid
sql
CMakeLists.txtdiscover.hfield.ccfield.hha_partition.cchandler.cchandler.hitem.ccitem.hitem_cmpfunc.ccitem_cmpfunc.hitem_func.ccitem_func.hitem_strfunc.ccitem_strfunc.hitem_sum.hlog.ccmysql_install_db.ccmysqld.ccrpl_parallel.ccrpl_rli.ccslave.ccslave.hsql_admin.ccsql_alter.ccsql_class.ccsql_class.hsql_cte.ccsql_cte.hsql_insert.ccsql_lex.ccsql_lex.hsql_list.hsql_parse.ccsql_plugin.cc
1
CREDITS
1
CREDITS
|
@ -9,6 +9,7 @@ MariaDB Corporation https://www.mariadb.com (2013)
|
|||
Microsoft https://microsoft.com/ (2017)
|
||||
ServiceNow https://servicenow.com (2019)
|
||||
SIT https://sit.org (2022)
|
||||
Tencent Cloud https://cloud.tencent.com (2017)
|
||||
Development Bank of Singapore https://dbs.com (2016)
|
||||
IBM https://www.ibm.com (2017)
|
||||
Automattic https://automattic.com (2019)
|
||||
|
|
|
@ -3798,7 +3798,10 @@ print_table_data(MYSQL_RES *result)
|
|||
{
|
||||
print_field_types(result);
|
||||
if (!mysql_num_rows(result))
|
||||
{
|
||||
my_afree((uchar*) num_flag);
|
||||
return;
|
||||
}
|
||||
mysql_field_seek(result,0);
|
||||
}
|
||||
separator.copy("+",1,charset_info);
|
||||
|
|
|
@ -569,14 +569,14 @@ static int file_exists(char * filename)
|
|||
@retval int error = 1, success = 0
|
||||
*/
|
||||
|
||||
static int search_dir(const char * base_path, const char *tool_name,
|
||||
static int search_dir(const char *base_path, const char *tool_name,
|
||||
const char *subdir, char *tool_path)
|
||||
{
|
||||
char new_path[FN_REFLEN];
|
||||
char source_path[FN_REFLEN];
|
||||
|
||||
strcpy(source_path, base_path);
|
||||
strcat(source_path, subdir);
|
||||
safe_strcpy(source_path, sizeof(source_path), base_path);
|
||||
safe_strcat(source_path, sizeof(source_path), subdir);
|
||||
fn_format(new_path, tool_name, source_path, "", MY_UNPACK_FILENAME);
|
||||
if (file_exists(new_path))
|
||||
{
|
||||
|
@ -632,7 +632,7 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
FILE *file_ptr;
|
||||
char path[FN_REFLEN];
|
||||
char line[1024];
|
||||
char *reason= 0;
|
||||
const char *reason= 0;
|
||||
char *res;
|
||||
int i= -1;
|
||||
|
||||
|
@ -643,14 +643,14 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
}
|
||||
if (!file_exists(opt_plugin_ini))
|
||||
{
|
||||
reason= (char *)"File does not exist.";
|
||||
reason= "File does not exist.";
|
||||
goto error;
|
||||
}
|
||||
|
||||
file_ptr= fopen(opt_plugin_ini, "r");
|
||||
if (file_ptr == NULL)
|
||||
{
|
||||
reason= (char *)"Cannot open file.";
|
||||
reason= "Cannot open file.";
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -660,17 +660,20 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
/* Read plugin components */
|
||||
while (i < 16)
|
||||
{
|
||||
size_t line_len;
|
||||
|
||||
res= fgets(line, sizeof(line), file_ptr);
|
||||
line_len= strlen(line);
|
||||
|
||||
/* strip /n */
|
||||
if (line[strlen(line)-1] == '\n')
|
||||
{
|
||||
line[strlen(line)-1]= '\0';
|
||||
}
|
||||
if (line[line_len - 1] == '\n')
|
||||
line[line_len - 1]= '\0';
|
||||
|
||||
if (res == NULL)
|
||||
{
|
||||
if (i < 1)
|
||||
{
|
||||
reason= (char *)"Bad format in plugin configuration file.";
|
||||
reason= "Bad format in plugin configuration file.";
|
||||
fclose(file_ptr);
|
||||
goto error;
|
||||
}
|
||||
|
@ -683,14 +686,19 @@ static int load_plugin_data(char *plugin_name, char *config_file)
|
|||
if (i == -1) /* if first pass, read this line as so_name */
|
||||
{
|
||||
/* Add proper file extension for soname */
|
||||
strcat(line, FN_SOEXT);
|
||||
if (safe_strcpy(line + line_len - 1, sizeof(line), FN_SOEXT))
|
||||
{
|
||||
reason= "Plugin name too long.";
|
||||
fclose(file_ptr);
|
||||
goto error;
|
||||
}
|
||||
/* save so_name */
|
||||
plugin_data.so_name= my_strdup(PSI_NOT_INSTRUMENTED, line, MYF(MY_WME|MY_ZEROFILL));
|
||||
i++;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (strlen(line) > 0)
|
||||
if (line_len > 0)
|
||||
{
|
||||
plugin_data.components[i]= my_strdup(PSI_NOT_INSTRUMENTED, line, MYF(MY_WME));
|
||||
i++;
|
||||
|
@ -779,14 +787,13 @@ static int check_options(int argc, char **argv, char *operation)
|
|||
/* read the plugin config file and check for match against argument */
|
||||
else
|
||||
{
|
||||
if (strlen(argv[i]) + 4 + 1 > FN_REFLEN)
|
||||
if (safe_strcpy(plugin_name, sizeof(plugin_name), argv[i]) ||
|
||||
safe_strcpy(config_file, sizeof(config_file), argv[i]) ||
|
||||
safe_strcat(config_file, sizeof(config_file), ".ini"))
|
||||
{
|
||||
fprintf(stderr, "ERROR: argument is too long.\n");
|
||||
return 1;
|
||||
}
|
||||
strcpy(plugin_name, argv[i]);
|
||||
strcpy(config_file, argv[i]);
|
||||
strcat(config_file, ".ini");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -855,35 +862,30 @@ static int check_options(int argc, char **argv, char *operation)
|
|||
static int process_options(int argc, char *argv[], char *operation)
|
||||
{
|
||||
int error= 0;
|
||||
int i= 0;
|
||||
|
||||
/* Parse and execute command-line options */
|
||||
if ((error= handle_options(&argc, &argv, my_long_options, get_one_option)))
|
||||
goto exit;
|
||||
return error;
|
||||
|
||||
/* If the print defaults option used, exit. */
|
||||
if (opt_print_defaults)
|
||||
{
|
||||
error= -1;
|
||||
goto exit;
|
||||
}
|
||||
return -1;
|
||||
|
||||
/* Add a trailing directory separator if not present */
|
||||
if (opt_basedir)
|
||||
{
|
||||
i= (int)strlength(opt_basedir);
|
||||
if (opt_basedir[i-1] != FN_LIBCHAR || opt_basedir[i-1] != FN_LIBCHAR2)
|
||||
size_t basedir_len= strlength(opt_basedir);
|
||||
if (opt_basedir[basedir_len - 1] != FN_LIBCHAR ||
|
||||
opt_basedir[basedir_len - 1] != FN_LIBCHAR2)
|
||||
{
|
||||
char buff[FN_REFLEN];
|
||||
memset(buff, 0, sizeof(buff));
|
||||
|
||||
strncpy(buff, opt_basedir, sizeof(buff) - 1);
|
||||
#ifdef _WIN32
|
||||
strncat(buff, "/", sizeof(buff) - strlen(buff) - 1);
|
||||
#else
|
||||
strncat(buff, FN_DIRSEP, sizeof(buff) - strlen(buff) - 1);
|
||||
#endif
|
||||
buff[sizeof(buff) - 1]= 0;
|
||||
if (basedir_len + 2 > FN_REFLEN)
|
||||
return -1;
|
||||
|
||||
memcpy(buff, opt_basedir, basedir_len);
|
||||
buff[basedir_len]= '/';
|
||||
buff[basedir_len + 1]= '\0';
|
||||
|
||||
my_free(opt_basedir);
|
||||
opt_basedir= my_strdup(PSI_NOT_INSTRUMENTED, buff, MYF(MY_FAE));
|
||||
}
|
||||
|
@ -895,10 +897,7 @@ static int process_options(int argc, char *argv[], char *operation)
|
|||
generated when the defaults were read from the file, exit.
|
||||
*/
|
||||
if (!opt_no_defaults && ((error= get_default_values())))
|
||||
{
|
||||
error= -1;
|
||||
goto exit;
|
||||
}
|
||||
return -1;
|
||||
|
||||
/*
|
||||
Check to ensure required options are present and validate the operation.
|
||||
|
@ -906,11 +905,9 @@ static int process_options(int argc, char *argv[], char *operation)
|
|||
read a configuration file named <plugin_name>.ini from the --plugin-dir
|
||||
or --plugin-ini location if the --plugin-ini option presented.
|
||||
*/
|
||||
strcpy(operation, "");
|
||||
if ((error = check_options(argc, argv, operation)))
|
||||
{
|
||||
goto exit;
|
||||
}
|
||||
operation[0]= '\0';
|
||||
if ((error= check_options(argc, argv, operation)))
|
||||
return error;
|
||||
|
||||
if (opt_verbose)
|
||||
{
|
||||
|
@ -922,8 +919,7 @@ static int process_options(int argc, char *argv[], char *operation)
|
|||
printf("# lc_messages_dir = %s\n", opt_lc_messages_dir);
|
||||
}
|
||||
|
||||
exit:
|
||||
return error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -2604,7 +2604,7 @@ static uint dump_events_for_db(char *db)
|
|||
if (mysql_query_with_error_report(mysql, &event_list_res, "show events"))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
strcpy(delimiter, ";");
|
||||
safe_strcpy(delimiter, sizeof(delimiter), ";");
|
||||
if (mysql_num_rows(event_list_res) > 0)
|
||||
{
|
||||
if (opt_xml)
|
||||
|
|
|
@ -6228,7 +6228,9 @@ int do_done(struct st_command *command)
|
|||
if (*cur_block->delim)
|
||||
{
|
||||
/* Restore "old" delimiter after false if block */
|
||||
strcpy (delimiter, cur_block->delim);
|
||||
if (safe_strcpy(delimiter, sizeof(delimiter), cur_block->delim))
|
||||
die("Delimiter too long, truncated");
|
||||
|
||||
delimiter_length= strlen(delimiter);
|
||||
}
|
||||
/* Pop block from stack, goto next line */
|
||||
|
@ -6483,10 +6485,12 @@ void do_block(enum block_cmd cmd, struct st_command* command)
|
|||
if (cur_block->ok)
|
||||
{
|
||||
cur_block->delim[0]= '\0';
|
||||
} else
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Remember "old" delimiter if entering a false if block */
|
||||
strcpy (cur_block->delim, delimiter);
|
||||
if (safe_strcpy(cur_block->delim, sizeof(cur_block->delim), delimiter))
|
||||
die("Delimiter too long, truncated");
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("OK: %d", cur_block->ok));
|
||||
|
@ -11888,9 +11892,8 @@ static int setenv(const char *name, const char *value, int overwrite)
|
|||
char *envvar= (char *)malloc(buflen);
|
||||
if(!envvar)
|
||||
return ENOMEM;
|
||||
strcpy(envvar, name);
|
||||
strcat(envvar, "=");
|
||||
strcat(envvar, value);
|
||||
|
||||
snprintf(envvar, buflen, "%s=%s", name, value);
|
||||
putenv(envvar);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -179,8 +179,8 @@ IF(WIN32)
|
|||
OPTION(SIGNCODE "Sign executables and dlls with digital certificate" OFF)
|
||||
MARK_AS_ADVANCED(SIGNCODE)
|
||||
IF(SIGNCODE)
|
||||
SET(SIGNTOOL_PARAMETERS
|
||||
/a /t http://timestamp.globalsign.com/?signature=sha2
|
||||
SET(SIGNTOOL_PARAMETERS
|
||||
/a /fd SHA256 /t http://timestamp.globalsign.com/?signature=sha2
|
||||
CACHE STRING "parameters for signtool (list)")
|
||||
IF(NOT SIGNTOOL_EXECUTABLE)
|
||||
FILE(GLOB path_list
|
||||
|
|
|
@ -511,7 +511,7 @@ static int DbugParse(CODE_STATE *cs, const char *control)
|
|||
stack->delay= stack->next->delay;
|
||||
stack->maxdepth= stack->next->maxdepth;
|
||||
stack->sub_level= stack->next->sub_level;
|
||||
strcpy(stack->name, stack->next->name);
|
||||
safe_strcpy(stack->name, sizeof(stack->name), stack->next->name);
|
||||
stack->out_file= stack->next->out_file;
|
||||
stack->out_file->used++;
|
||||
if (stack->next == &init_settings)
|
||||
|
|
|
@ -771,7 +771,7 @@ parse_page(
|
|||
{
|
||||
unsigned long long id;
|
||||
uint16_t undo_page_type;
|
||||
char str[20]={'\0'};
|
||||
const char *str;
|
||||
ulint n_recs;
|
||||
uint32_t page_no, left_page_no, right_page_no;
|
||||
ulint data_bytes;
|
||||
|
@ -779,11 +779,7 @@ parse_page(
|
|||
ulint size_range_id;
|
||||
|
||||
/* Check whether page is doublewrite buffer. */
|
||||
if(skip_page) {
|
||||
strcpy(str, "Double_write_buffer");
|
||||
} else {
|
||||
strcpy(str, "-");
|
||||
}
|
||||
str = skip_page ? "Double_write_buffer" : "-";
|
||||
|
||||
switch (fil_page_get_type(page)) {
|
||||
|
||||
|
|
|
@ -58,6 +58,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
|||
#include "backup_debug.h"
|
||||
#include "backup_mysql.h"
|
||||
#include <btr0btr.h>
|
||||
#ifdef _WIN32
|
||||
#include <direct.h> /* rmdir */
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <aclapi.h>
|
||||
|
@ -1562,7 +1565,49 @@ bool backup_finish()
|
|||
return(true);
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
/*
|
||||
Drop all empty database directories in the base backup
|
||||
that do not exists in the icremental backup.
|
||||
|
||||
This effectively re-plays all DROP DATABASE statements happened
|
||||
in between base backup and incremental backup creation time.
|
||||
|
||||
Note, only checking if base_dir/db/ is empty is not enough,
|
||||
because inc_dir/db/db.opt might have been dropped for some reasons,
|
||||
which may also result into empty base_dir/db/.
|
||||
|
||||
Only the fact that at the same time:
|
||||
- base_dir/db/ exists
|
||||
- inc_dir/db/ does not exist
|
||||
means that DROP DATABASE happened.
|
||||
*/
|
||||
static void
|
||||
ibx_incremental_drop_databases(const char *base_dir,
|
||||
const char *inc_dir)
|
||||
{
|
||||
datadir_node_t node;
|
||||
datadir_node_init(&node);
|
||||
datadir_iter_t *it = datadir_iter_new(base_dir);
|
||||
|
||||
while (datadir_iter_next(it, &node)) {
|
||||
if (node.is_empty_dir) {
|
||||
char path[FN_REFLEN];
|
||||
snprintf(path, sizeof(path), "%s/%s",
|
||||
inc_dir, node.filepath_rel);
|
||||
if (!directory_exists(path, false)) {
|
||||
msg("Removing %s", node.filepath);
|
||||
rmdir(node.filepath);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
datadir_iter_free(it);
|
||||
datadir_node_free(&node);
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
ibx_copy_incremental_over_full()
|
||||
{
|
||||
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
|
||||
|
@ -1645,6 +1690,8 @@ ibx_copy_incremental_over_full()
|
|||
}
|
||||
copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true);
|
||||
}
|
||||
ibx_incremental_drop_databases(xtrabackup_target_dir,
|
||||
xtrabackup_incremental_dir);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1676,8 +1676,11 @@ container_list_add_object(container_list *list, const char *name,
|
|||
list->object_count += object_count_step;
|
||||
}
|
||||
assert(list->idx <= list->object_count);
|
||||
strcpy(list->objects[list->idx].name, name);
|
||||
strcpy(list->objects[list->idx].hash, hash);
|
||||
safe_strcpy(list->objects[list->idx].name,
|
||||
sizeof(list->objects[list->idx].name), name);
|
||||
safe_strcpy(list->objects[list->idx].hash,
|
||||
sizeof(list->objects[list->idx].hash), hash);
|
||||
|
||||
list->objects[list->idx].bytes = bytes;
|
||||
++list->idx;
|
||||
}
|
||||
|
|
|
@ -4515,11 +4515,13 @@ static bool xtrabackup_backup_low()
|
|||
return false;
|
||||
}
|
||||
|
||||
if(!xtrabackup_incremental) {
|
||||
strcpy(metadata_type, "full-backuped");
|
||||
if (!xtrabackup_incremental) {
|
||||
safe_strcpy(metadata_type, sizeof(metadata_type),
|
||||
"full-backuped");
|
||||
metadata_from_lsn = 0;
|
||||
} else {
|
||||
strcpy(metadata_type, "incremental");
|
||||
safe_strcpy(metadata_type, sizeof(metadata_type),
|
||||
"incremental");
|
||||
metadata_from_lsn = incremental_lsn;
|
||||
}
|
||||
metadata_last_lsn = recv_sys.lsn;
|
||||
|
@ -6109,7 +6111,8 @@ error:
|
|||
if (ok) {
|
||||
char filename[FN_REFLEN];
|
||||
|
||||
strcpy(metadata_type, "log-applied");
|
||||
safe_strcpy(metadata_type, sizeof(metadata_type),
|
||||
"log-applied");
|
||||
|
||||
if(xtrabackup_incremental
|
||||
&& metadata_to_lsn < incremental_to_lsn)
|
||||
|
|
|
@ -226,6 +226,44 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str,
|
|||
lex_str->length= len;
|
||||
}
|
||||
|
||||
/*
|
||||
Copies src into dst and ensures dst is a NULL terminated C string.
|
||||
|
||||
Returns 1 if the src string was truncated due to too small size of dst.
|
||||
Returns 0 if src completely fit within dst. Pads the remaining dst with '\0'
|
||||
|
||||
Note: dst_size must be > 0
|
||||
*/
|
||||
static inline int safe_strcpy(char *dst, size_t dst_size, const char *src)
|
||||
{
|
||||
memset(dst, '\0', dst_size);
|
||||
strncpy(dst, src, dst_size - 1);
|
||||
/*
|
||||
If the first condition is true, we are guaranteed to have src length
|
||||
>= (dst_size - 1), hence safe to access src[dst_size - 1].
|
||||
*/
|
||||
if (dst[dst_size - 2] != '\0' && src[dst_size - 1] != '\0')
|
||||
return 1; /* Truncation of src. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Appends src to dst and ensures dst is a NULL terminated C string.
|
||||
|
||||
Returns 1 if the src string was truncated due to too small size of dst.
|
||||
Returns 0 if src completely fit within the remaining dst space. Pads the
|
||||
remaining dst with '\0'.
|
||||
|
||||
Note: dst_size must be > 0
|
||||
*/
|
||||
static inline int safe_strcat(char *dst, size_t dst_size, const char *src)
|
||||
{
|
||||
size_t init_len= strlen(dst);
|
||||
if (init_len >= dst_size - 1)
|
||||
return 1;
|
||||
return safe_strcpy(dst + init_len, dst_size - init_len, src);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
static inline char *safe_str(char *str)
|
||||
{ return str ? str : const_cast<char*>(""); }
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
# [--let $rpl_skip_start_slave= 1]
|
||||
# [--let $rpl_debug= 1]
|
||||
# [--let $slave_timeout= NUMBER]
|
||||
# [--let $rpl_server_skip_log_bin= 1]
|
||||
# --source include/master-slave.inc
|
||||
#
|
||||
# Parameters:
|
||||
|
|
|
@ -73,6 +73,7 @@
|
|||
# before CHANGE MASTER and START SLAVE. RESET MASTER and RESET
|
||||
# SLAVE are suppressed if $rpl_skip_reset_master_and_slave is
|
||||
# set.
|
||||
# Also see $rpl_server_skip_log_bin.
|
||||
#
|
||||
# $rpl_skip_change_master
|
||||
# By default, this script issues CHANGE MASTER so that all slaves
|
||||
|
@ -94,6 +95,10 @@
|
|||
# Timeout used when waiting for the slave threads to start.
|
||||
# See include/wait_for_slave_param.inc
|
||||
#
|
||||
# $rpl_server_skip_log_bin
|
||||
# When $rpl_skip_reset_master_and_slave is not set
|
||||
# RESET MASTER does not report ER_FLUSH_MASTER_BINLOG_CLOSED
|
||||
# on any server.
|
||||
#
|
||||
# ==== Side effects ====
|
||||
#
|
||||
|
@ -161,7 +166,16 @@ while ($_rpl_server)
|
|||
USE test;
|
||||
if (!$rpl_skip_reset_master_and_slave)
|
||||
{
|
||||
RESET MASTER;
|
||||
if (!$rpl_server_skip_log_bin)
|
||||
{
|
||||
--error 0
|
||||
RESET MASTER;
|
||||
}
|
||||
if ($rpl_server_skip_log_bin)
|
||||
{
|
||||
--error 0,ER_FLUSH_MASTER_BINLOG_CLOSED
|
||||
RESET MASTER;
|
||||
}
|
||||
SET GLOBAL gtid_slave_pos= "";
|
||||
RESET SLAVE;
|
||||
}
|
||||
|
|
|
@ -5703,6 +5703,178 @@ r
|
|||
3
|
||||
drop table t1,t2,t3,x;
|
||||
#
|
||||
# MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
|
||||
# within a CTE with name 'x' used in a subquery from
|
||||
# select list of another CTE
|
||||
#
|
||||
CREATE TABLE x (a int) ENGINE=MyISAM;
|
||||
INSERT INTO x VALUES (3),(7),(1);
|
||||
CREATE TABLE t1 (b int) ENGINE=MYISAM;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
3
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
c
|
||||
3
|
||||
WITH x AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT x.c from x;
|
||||
c
|
||||
1
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
|
||||
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c from cte;
|
||||
c
|
||||
2
|
||||
DROP TABLE x;
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH x AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT x.c from x;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
|
||||
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c from cte;
|
||||
ERROR 42S02: Table 'test.x' doesn't exist
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -3874,6 +3874,129 @@ select * from cte;
|
|||
|
||||
drop table t1,t2,t3,x;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
|
||||
--echo # within a CTE with name 'x' used in a subquery from
|
||||
--echo # select list of another CTE
|
||||
--echo #
|
||||
|
||||
CREATE TABLE x (a int) ENGINE=MyISAM;
|
||||
INSERT INTO x VALUES (3),(7),(1);
|
||||
CREATE TABLE t1 (b int) ENGINE=MYISAM;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
|
||||
let $q1=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q1;
|
||||
|
||||
let $q2=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q2;
|
||||
|
||||
let $q3=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q3;
|
||||
|
||||
|
||||
let $q4=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q4;
|
||||
|
||||
let $q5=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c FROM cte;
|
||||
eval $q5;
|
||||
|
||||
let $q6=
|
||||
WITH x AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
|
||||
SELECT b FROM x AS r
|
||||
) AS c
|
||||
)
|
||||
SELECT x.c from x;
|
||||
eval $q6;
|
||||
|
||||
let $q7=
|
||||
WITH cte AS
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
WITH x AS
|
||||
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
|
||||
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
|
||||
) AS c
|
||||
)
|
||||
SELECT cte.c from cte;
|
||||
eval $q7;
|
||||
|
||||
|
||||
DROP TABLE x;
|
||||
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q1;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q2;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q3;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q4;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q5;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q6;
|
||||
--ERROR ER_NO_SUCH_TABLE
|
||||
eval $q7;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
|
|
@ -11383,6 +11383,181 @@ a
|
|||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
# Start of 10.4 tests
|
||||
#
|
||||
#
|
||||
# MDEV-27653 long uniques don't work with unicode collations
|
||||
#
|
||||
SET NAMES utf8mb3;
|
||||
CREATE TABLE t1 (
|
||||
a CHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` char(30) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
a
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
a CHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a(10)) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` char(30) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`(10)) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
a
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
a VARCHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` varchar(30) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
a
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
a VARCHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a(10)) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` varchar(30) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`(10)) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
a
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a TEXT COLLATE utf8mb3_general_ci UNIQUE);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
a
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
a LONGTEXT COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a(10)) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` longtext CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`(10)) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
a
|
||||
DROP TABLE t1;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
a OCTET_LENGTH(a)
|
||||
a 1
|
||||
ä 2
|
||||
CHECK TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
|
||||
INSERT INTO t1 VALUES ('A');
|
||||
ERROR 23000: Duplicate entry 'A' for key 'a'
|
||||
INSERT INTO t1 VALUES ('Ä');
|
||||
ERROR 23000: Duplicate entry 'Ä' for key 'a'
|
||||
INSERT INTO t1 VALUES ('Ấ');
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
a OCTET_LENGTH(a)
|
||||
a 1
|
||||
ä 2
|
||||
Ấ 3
|
||||
CHECK TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
|
||||
ALTER TABLE t1 FORCE;
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
DELETE FROM t1 WHERE OCTET_LENGTH(a)>1;
|
||||
ALTER TABLE t1 FORCE;
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
ERROR 23000: Duplicate entry 'ä' for key 'a'
|
||||
DROP TABLE t1;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
a OCTET_LENGTH(a)
|
||||
a 1
|
||||
ä 2
|
||||
ALTER IGNORE TABLE t1 FORCE;
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
a OCTET_LENGTH(a)
|
||||
a 1
|
||||
DROP TABLE t1;
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` text CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
a OCTET_LENGTH(a)
|
||||
a 1
|
||||
ä 2
|
||||
REPAIR TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 repair Warning Number of rows changed from 2 to 1
|
||||
test.t1 repair status OK
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
a OCTET_LENGTH(a)
|
||||
a 1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
#
|
||||
# Start of 10.5 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -2313,6 +2313,164 @@ VALUES (_latin1 0xDF) UNION VALUES(_utf8'a' COLLATE utf8_bin);
|
|||
--echo #
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.4 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27653 long uniques don't work with unicode collations
|
||||
--echo #
|
||||
|
||||
SET NAMES utf8mb3;
|
||||
|
||||
# CHAR
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a CHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a CHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a(10)) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
# VARCHAR
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a VARCHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a VARCHAR(30) COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a(10)) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
# TEXT
|
||||
|
||||
CREATE TABLE t1 (a TEXT COLLATE utf8mb3_general_ci UNIQUE);
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
a LONGTEXT COLLATE utf8mb3_general_ci,
|
||||
UNIQUE KEY(a(10)) USING HASH
|
||||
);
|
||||
SHOW CREATE TABLE t1;
|
||||
INSERT INTO t1 VALUES ('a');
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
|
||||
# Testing upgrade:
|
||||
# Prior to MDEV-27653, the UNIQUE HASH function errorneously
|
||||
# took into account string octet length.
|
||||
# Old tables should still open and work, but with wrong results.
|
||||
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.frm $MYSQLD_DATADIR/test/t1.frm;
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYD $MYSQLD_DATADIR/test/t1.MYD;
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYI $MYSQLD_DATADIR/test/t1.MYI;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
CHECK TABLE t1;
|
||||
|
||||
# There is already a one byte value 'a' in the table
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('A');
|
||||
|
||||
# There is already a two-byte value 'ä' in the table
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('Ä');
|
||||
|
||||
# There were no three-byte values in the table so far.
|
||||
# The below value violates UNIQUE, but it gets inserted.
|
||||
# This is wrong but expected for a pre-MDEV-27653 table.
|
||||
INSERT INTO t1 VALUES ('Ấ');
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
CHECK TABLE t1;
|
||||
|
||||
# ALTER FORCE fails: it tries to rebuild the table
|
||||
# with a correct UNIQUE HASH function, but there are duplicates!
|
||||
--error ER_DUP_ENTRY
|
||||
ALTER TABLE t1 FORCE;
|
||||
|
||||
# Let's remove all duplicate values, so only the one-byte 'a' stays.
|
||||
# ALTER..FORCE should work after that.
|
||||
DELETE FROM t1 WHERE OCTET_LENGTH(a)>1;
|
||||
ALTER TABLE t1 FORCE;
|
||||
|
||||
# Make sure that 'a' and 'ä' cannot co-exists any more,
|
||||
# because the table was recreated with a correct UNIQUE HASH function.
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 VALUES ('ä');
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Testing an old table with ALTER IGNORE.
|
||||
# The table is expected to rebuild with a new hash function,
|
||||
# duplicates go away.
|
||||
#
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.frm $MYSQLD_DATADIR/test/t1.frm;
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYD $MYSQLD_DATADIR/test/t1.MYD;
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYI $MYSQLD_DATADIR/test/t1.MYI;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
ALTER IGNORE TABLE t1 FORCE;
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Testing an old table with REPAIR.
|
||||
# The table is expected to rebuild with a new hash function,
|
||||
# duplicates go away.
|
||||
#
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.frm $MYSQLD_DATADIR/test/t1.frm;
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYD $MYSQLD_DATADIR/test/t1.MYD;
|
||||
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYI $MYSQLD_DATADIR/test/t1.MYI;
|
||||
SHOW CREATE TABLE t1;
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
REPAIR TABLE t1;
|
||||
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.5 tests
|
||||
--echo #
|
||||
|
|
|
@ -20611,6 +20611,69 @@ DROP TABLE transaction_items;
|
|||
DROP TABLE transactions;
|
||||
DROP TABLE charges;
|
||||
DROP TABLE ledgers;
|
||||
#
|
||||
# MDEV-30081: Splitting from a constant mergeable derived table
|
||||
# used in inner part of an outer join.
|
||||
#
|
||||
CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (3),(4),(7);
|
||||
CREATE TABLE t2 (
|
||||
id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (4,4,6),(7,7,7);
|
||||
CREATE TABLE t3 (
|
||||
wid int, wtid int, otid int, oid int,
|
||||
PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
|
||||
CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
|
||||
INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
CREATE TABLE t5 (
|
||||
id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
|
||||
) ENGINE=MyISAM ;
|
||||
INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
ANALYZE TABLE t1,t2,t3,t4,t5;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
test.t2 analyze status OK
|
||||
test.t3 analyze status Engine-independent statistics collected
|
||||
test.t3 analyze status OK
|
||||
test.t4 analyze status Engine-independent statistics collected
|
||||
test.t4 analyze status OK
|
||||
test.t5 analyze status Engine-independent statistics collected
|
||||
test.t5 analyze status OK
|
||||
CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
|
||||
SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
|
||||
FROM
|
||||
t1, t2, t3
|
||||
LEFT JOIN
|
||||
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
|
||||
ON t3.oid = dt.id AND t3.otid = 14
|
||||
LEFT JOIN v1
|
||||
ON (v1.id1 = dt.a)
|
||||
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
|
||||
wid wtid otid oid t1_id t2_id id a id1
|
||||
7 17 7 7 7 7 NULL NULL NULL
|
||||
EXPLAIN SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
|
||||
FROM
|
||||
t1, t2, t3
|
||||
LEFT JOIN
|
||||
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
|
||||
ON t3.oid = dt.id AND t3.otid = 14
|
||||
LEFT JOIN v1
|
||||
ON (v1.id1 = dt.a)
|
||||
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t3 const PRIMARY,oid PRIMARY 4 const 1
|
||||
1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index
|
||||
1 PRIMARY t2 const PRIMARY PRIMARY 4 const 1 Using index
|
||||
1 PRIMARY t4 const PRIMARY,a NULL NULL NULL 1 Impossible ON condition
|
||||
1 PRIMARY <derived3> ref key0 key0 5 const 0 Using where
|
||||
3 LATERAL DERIVED t5 ref id1 id1 5 const 0 Using index
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1,t2,t3,t4,t5;
|
||||
# End of 10.3 tests
|
||||
#
|
||||
# MDEV-18679: materialized view with SELECT S containing materialized
|
||||
|
|
|
@ -3871,6 +3871,55 @@ DROP TABLE transactions;
|
|||
DROP TABLE charges;
|
||||
DROP TABLE ledgers;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30081: Splitting from a constant mergeable derived table
|
||||
--echo # used in inner part of an outer join.
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (3),(4),(7);
|
||||
|
||||
CREATE TABLE t2 (
|
||||
id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (4,4,6),(7,7,7);
|
||||
|
||||
CREATE TABLE t3 (
|
||||
wid int, wtid int, otid int, oid int,
|
||||
PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
|
||||
) ENGINE=MyISAM;
|
||||
INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
|
||||
|
||||
CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
|
||||
INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
|
||||
CREATE TABLE t5 (
|
||||
id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
|
||||
) ENGINE=MyISAM ;
|
||||
INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
|
||||
|
||||
ANALYZE TABLE t1,t2,t3,t4,t5;
|
||||
|
||||
CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
|
||||
|
||||
let $q=
|
||||
SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
|
||||
FROM
|
||||
t1, t2, t3
|
||||
LEFT JOIN
|
||||
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
|
||||
ON t3.oid = dt.id AND t3.otid = 14
|
||||
LEFT JOIN v1
|
||||
ON (v1.id1 = dt.a)
|
||||
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
|
||||
|
||||
eval $q;
|
||||
eval EXPLAIN $q;
|
||||
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1,t2,t3,t4,t5;
|
||||
|
||||
--echo # End of 10.3 tests
|
||||
|
||||
--echo #
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#
|
||||
# MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list
|
||||
#
|
||||
create table t1 (a int, b int, c int, d int, e int);
|
||||
insert into t1 () values
|
||||
(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
|
||||
|
@ -11,6 +14,9 @@ load data infile 'load.data' into table tmp;
|
|||
delete from tmp;
|
||||
drop table t1;
|
||||
drop table tmp;
|
||||
#
|
||||
# MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob
|
||||
#
|
||||
create table t1 (b blob) engine=innodb;
|
||||
alter table t1 add unique (b);
|
||||
alter table t1 force;
|
||||
|
@ -21,12 +27,18 @@ t1 CREATE TABLE `t1` (
|
|||
UNIQUE KEY `b` (`b`) USING HASH
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob
|
||||
#
|
||||
create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam;
|
||||
insert into t1 values (1,'foo');
|
||||
replace into t1 (pk) values (1);
|
||||
alter table t1 force;
|
||||
replace into t1 (pk) values (1);
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob
|
||||
#
|
||||
create table t1 (t time, unique(t)) engine=innodb;
|
||||
insert into t1 values (null),(null);
|
||||
alter ignore table t1 modify t text not null default '';
|
||||
|
@ -34,6 +46,9 @@ Warnings:
|
|||
Warning 1265 Data truncated for column 't' at row 1
|
||||
Warning 1265 Data truncated for column 't' at row 2
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob
|
||||
#
|
||||
create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning;
|
||||
insert into t1 values (1,'foo');
|
||||
update t1 set f = 'bar';
|
||||
|
@ -50,20 +65,32 @@ pk f row_end > DATE'2030-01-01'
|
|||
1 foo 0
|
||||
1 bar 0
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob
|
||||
#
|
||||
create temporary table t1 (f blob, unique(f)) engine=innodb;
|
||||
insert into t1 values (1);
|
||||
replace into t1 values (1);
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table
|
||||
#
|
||||
create table t (b blob, unique(b)) engine=myisam;
|
||||
insert into t values ('foo');
|
||||
replace into t values ('foo');
|
||||
drop table t;
|
||||
#
|
||||
# MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index
|
||||
#
|
||||
CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x));
|
||||
INSERT INTO t1 VALUES (1,'foo');
|
||||
ALTER TABLE t1 DROP x, ALGORITHM=INPLACE;
|
||||
ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY
|
||||
UPDATE t1 SET x = 'bar';
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18799 Long unique does not work after failed alter table
|
||||
#
|
||||
create table t1(a blob unique , b blob);
|
||||
insert into t1 values(1,1),(2,1);
|
||||
alter table t1 add unique(b);
|
||||
|
@ -86,16 +113,26 @@ Ignored NO
|
|||
insert into t1 values(1,1);
|
||||
ERROR 23000: Duplicate entry '1' for key 'a'
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key
|
||||
#
|
||||
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM;
|
||||
ALTER TABLE t1 DROP x;
|
||||
ERROR 42000: Can't DROP COLUMN `x`; check that it exists
|
||||
UPDATE t1 SET b = 0 WHERE a = 'foo';
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in
|
||||
# row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search"
|
||||
#
|
||||
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB;
|
||||
ALTER TABLE t1 DROP x;
|
||||
ERROR 42000: Can't DROP COLUMN `x`; check that it exists
|
||||
UPDATE t1 SET b = 0 WHERE a = 'foo';
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index
|
||||
#
|
||||
CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
|
||||
ALTER TABLE t1 ADD KEY (f);
|
||||
ERROR HY000: Index column size too large. The maximum column size is 767 bytes
|
||||
|
@ -103,17 +140,29 @@ TRUNCATE TABLE t1;
|
|||
SELECT * FROM t1 WHERE f LIKE 'foo';
|
||||
f
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in
|
||||
# ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key
|
||||
#
|
||||
CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB;
|
||||
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT;
|
||||
Warnings:
|
||||
Note 1054 Unknown column 'b' in 't1'
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow
|
||||
# in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index
|
||||
#
|
||||
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
|
||||
ALTER TABLE t1 DROP x;
|
||||
ERROR 42000: Can't DROP COLUMN `x`; check that it exists
|
||||
SELECT * FROM t1 WHERE f LIKE 'foo';
|
||||
f
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18800 Server crash in instant_alter_column_possible or
|
||||
# Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key
|
||||
#
|
||||
CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB;
|
||||
show keys from t1;;
|
||||
Table t1
|
||||
|
@ -132,6 +181,9 @@ Index_comment
|
|||
Ignored NO
|
||||
ALTER TABLE t1 ADD INDEX (pk);
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18922 Alter on long unique varchar column makes result null
|
||||
#
|
||||
CREATE TABLE t1 (b int, a varchar(4000));
|
||||
INSERT INTO t1 VALUES (1, 2),(2,3),(3,4);
|
||||
ALTER TABLE t1 ADD UNIQUE INDEX (a);
|
||||
|
@ -146,6 +198,10 @@ a
|
|||
3
|
||||
4
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags
|
||||
# & (1<< 30)' failed in setup_keyinfo_hash
|
||||
#
|
||||
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
|
||||
ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
|
||||
ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY
|
||||
|
@ -161,6 +217,9 @@ insert into t1 values(1,1);
|
|||
ERROR 23000: Duplicate entry '1-1' for key 'a'
|
||||
alter table t1 add column c int;
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18889 Long unique on virtual fields crashes server
|
||||
#
|
||||
create table t1(a blob , b blob as (a) unique);
|
||||
insert into t1 values(1, default);
|
||||
insert into t1 values(1, default);
|
||||
|
@ -174,6 +233,9 @@ insert into t1(a,b) values(2,2);
|
|||
insert into t1(a,b) values(2,3);
|
||||
insert into t1(a,b) values(3,2);
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
a CHAR(128),
|
||||
b CHAR(128) AS (a),
|
||||
|
@ -189,6 +251,9 @@ c varchar(5000),
|
|||
UNIQUE(c,b(64))
|
||||
) ENGINE=InnoDB;
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18967 Load data in system version with long unique does not work
|
||||
#
|
||||
CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning;
|
||||
INSERT INTO t1 VALUES ('A');
|
||||
SELECT * INTO OUTFILE 'load.data' from t1;
|
||||
|
@ -198,6 +263,9 @@ select * from t1;
|
|||
data
|
||||
A
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column)
|
||||
#
|
||||
CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB;
|
||||
INSERT INTO t1 VALUES ('f'), ('o'), ('o');
|
||||
SELECT * INTO OUTFILE 'load.data' from t1;
|
||||
|
@ -216,12 +284,16 @@ SELECT * FROM t1;
|
|||
data
|
||||
f
|
||||
o
|
||||
# This should be equivalent to the REPLACE above
|
||||
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
data
|
||||
f
|
||||
o
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18953 Hash index on partial char field not working
|
||||
#
|
||||
create table t1 (
|
||||
c char(10) character set utf8mb4,
|
||||
unique key a using hash (c(1))
|
||||
|
@ -238,10 +310,16 @@ ERROR 23000: Duplicate entry '
|
|||
insert into t1 values ('ббб');
|
||||
ERROR 23000: Duplicate entry 'Ð' for key 'a'
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map
|
||||
#
|
||||
CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2;
|
||||
INSERT INTO t1 VALUES (2);
|
||||
REPLACE INTO t1 VALUES (2);
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key'
|
||||
#
|
||||
set innodb_lock_wait_timeout= 10;
|
||||
CREATE TABLE t1 (
|
||||
id int primary key,
|
||||
|
@ -268,11 +346,20 @@ ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
|||
disconnect con1;
|
||||
connection default;
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-18791 Wrong error upon creating Aria table with long index on BLOB
|
||||
#
|
||||
CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
|
||||
ERROR 42000: Specified key was too long; max key length is 2300 bytes
|
||||
#
|
||||
# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
|
||||
#
|
||||
create table t1(a int, unique(a) using hash);
|
||||
#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
|
||||
#
|
||||
SET binlog_row_image= NOBLOB;
|
||||
CREATE TABLE t1 (pk INT PRIMARY KEY, a text ,UNIQUE(a) using hash);
|
||||
INSERT INTO t1 VALUES (1,'foo');
|
||||
|
@ -280,6 +367,9 @@ create table t2(id int primary key, a blob, b varchar(20) as (LEFT(a,2)));
|
|||
INSERT INTO t2 VALUES (1, 'foo', default);
|
||||
DROP TABLE t1, t2;
|
||||
SET binlog_row_image= FULL;
|
||||
#
|
||||
# MDEV-22719 Long unique keys are not created when individual key_part->length < max_key_length but SUM(key_parts->length) > max_key_length
|
||||
#
|
||||
CREATE TABLE t1 (a int, b VARCHAR(1000), UNIQUE (a,b)) ENGINE=MyISAM;
|
||||
show index from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Ignored
|
||||
|
@ -317,6 +407,9 @@ ERROR 23000: Duplicate entry '1' for key 'v2'
|
|||
update t1,t2 set v1 = v2 , v5 = 0;
|
||||
ERROR 23000: Duplicate entry '-128' for key 'v1'
|
||||
drop table t1, t2;
|
||||
#
|
||||
# MDEV-23264 Unique blobs allow duplicate values upon UPDATE
|
||||
#
|
||||
CREATE TABLE t1 (f TEXT UNIQUE);
|
||||
INSERT INTO t1 VALUES (NULL),(NULL);
|
||||
UPDATE t1 SET f = '';
|
||||
|
@ -346,6 +439,18 @@ partition n0 values less than (10),
|
|||
partition n1 values less than (50));
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-29199 Unique hash key is ignored upon INSERT ... SELECT into non-empty MyISAM table
|
||||
#
|
||||
create table t1 (a int, b text, unique(b)) engine=MyISAM;
|
||||
insert into t1 values (0,'aa');
|
||||
insert into t1 (a,b) select 1,'xxx' from seq_1_to_5;
|
||||
ERROR 23000: Duplicate entry 'xxx' for key 'b'
|
||||
select * from t1;
|
||||
a b
|
||||
0 aa
|
||||
1 xxx
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_partition.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
#
|
||||
# MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list
|
||||
--echo #
|
||||
create table t1 (a int, b int, c int, d int, e int);
|
||||
insert into t1 () values
|
||||
(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
|
||||
|
@ -20,18 +21,18 @@ drop table t1;
|
|||
--remove_file $datadir/test/load.data
|
||||
drop table tmp;
|
||||
|
||||
#
|
||||
# MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob
|
||||
--echo #
|
||||
create table t1 (b blob) engine=innodb;
|
||||
alter table t1 add unique (b);
|
||||
alter table t1 force;
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob
|
||||
--echo #
|
||||
create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam;
|
||||
insert into t1 values (1,'foo');
|
||||
replace into t1 (pk) values (1);
|
||||
|
@ -39,17 +40,17 @@ alter table t1 force;
|
|||
replace into t1 (pk) values (1);
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob
|
||||
--echo #
|
||||
create table t1 (t time, unique(t)) engine=innodb;
|
||||
insert into t1 values (null),(null);
|
||||
alter ignore table t1 modify t text not null default '';
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob
|
||||
--echo #
|
||||
create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning;
|
||||
insert into t1 values (1,'foo');
|
||||
update t1 set f = 'bar';
|
||||
|
@ -59,25 +60,25 @@ select * from t1;
|
|||
select pk, f, row_end > DATE'2030-01-01' from t1 for system_time all;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob
|
||||
--echo #
|
||||
create temporary table t1 (f blob, unique(f)) engine=innodb;
|
||||
insert into t1 values (1);
|
||||
replace into t1 values (1);
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table
|
||||
--echo #
|
||||
create table t (b blob, unique(b)) engine=myisam;
|
||||
insert into t values ('foo');
|
||||
replace into t values ('foo');
|
||||
drop table t;
|
||||
|
||||
#
|
||||
# MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index
|
||||
--echo #
|
||||
CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x));
|
||||
INSERT INTO t1 VALUES (1,'foo');
|
||||
--error ER_ALTER_OPERATION_NOT_SUPPORTED
|
||||
|
@ -85,9 +86,9 @@ ALTER TABLE t1 DROP x, ALGORITHM=INPLACE;
|
|||
UPDATE t1 SET x = 'bar';
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18799 Long unique does not work after failed alter table
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18799 Long unique does not work after failed alter table
|
||||
--echo #
|
||||
create table t1(a blob unique , b blob);
|
||||
insert into t1 values(1,1),(2,1);
|
||||
--error ER_DUP_ENTRY
|
||||
|
@ -97,28 +98,28 @@ alter table t1 add unique(b);
|
|||
insert into t1 values(1,1);
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key
|
||||
--echo #
|
||||
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
ALTER TABLE t1 DROP x;
|
||||
UPDATE t1 SET b = 0 WHERE a = 'foo';
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in
|
||||
# row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search"
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in
|
||||
--echo # row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search"
|
||||
--echo #
|
||||
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
ALTER TABLE t1 DROP x;
|
||||
UPDATE t1 SET b = 0 WHERE a = 'foo';
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index
|
||||
--echo #
|
||||
CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
|
||||
--error ER_INDEX_COLUMN_TOO_LONG
|
||||
ALTER TABLE t1 ADD KEY (f);
|
||||
|
@ -126,36 +127,36 @@ TRUNCATE TABLE t1;
|
|||
SELECT * FROM t1 WHERE f LIKE 'foo';
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in
|
||||
# ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in
|
||||
--echo # ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key
|
||||
--echo #
|
||||
CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB;
|
||||
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow
|
||||
# in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow
|
||||
--echo # in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index
|
||||
--echo #
|
||||
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
ALTER TABLE t1 DROP x;
|
||||
SELECT * FROM t1 WHERE f LIKE 'foo';
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18800 Server crash in instant_alter_column_possible or
|
||||
# Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18800 Server crash in instant_alter_column_possible or
|
||||
--echo # Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key
|
||||
--echo #
|
||||
CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB;
|
||||
--query_vertical show keys from t1;
|
||||
ALTER TABLE t1 ADD INDEX (pk);
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18922 Alter on long unique varchar column makes result null
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18922 Alter on long unique varchar column makes result null
|
||||
--echo #
|
||||
CREATE TABLE t1 (b int, a varchar(4000));
|
||||
INSERT INTO t1 VALUES (1, 2),(2,3),(3,4);
|
||||
ALTER TABLE t1 ADD UNIQUE INDEX (a);
|
||||
|
@ -163,10 +164,10 @@ SELECT * FROM t1;
|
|||
SELECT a FROM t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags
|
||||
# & (1<< 30)' failed in setup_keyinfo_hash
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags
|
||||
--echo # & (1<< 30)' failed in setup_keyinfo_hash
|
||||
--echo #
|
||||
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
|
||||
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
|
||||
ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
|
||||
|
@ -181,9 +182,9 @@ insert into t1 values(1,1);
|
|||
alter table t1 add column c int;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18889 Long unique on virtual fields crashes server
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18889 Long unique on virtual fields crashes server
|
||||
--echo #
|
||||
create table t1(a blob , b blob as (a) unique);
|
||||
insert into t1 values(1, default);
|
||||
--error ER_DUP_ENTRY
|
||||
|
@ -199,9 +200,9 @@ insert into t1(a,b) values(2,3);
|
|||
insert into t1(a,b) values(3,2);
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN
|
||||
--echo #
|
||||
CREATE TABLE t1 (
|
||||
a CHAR(128),
|
||||
b CHAR(128) AS (a),
|
||||
|
@ -218,9 +219,9 @@ CREATE TABLE t1 (
|
|||
) ENGINE=InnoDB;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18967 Load data in system version with long unique does not work
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18967 Load data in system version with long unique does not work
|
||||
--echo #
|
||||
CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning;
|
||||
INSERT INTO t1 VALUES ('A');
|
||||
SELECT * INTO OUTFILE 'load.data' from t1;
|
||||
|
@ -231,9 +232,9 @@ DROP TABLE t1;
|
|||
--let $datadir= `select @@datadir`
|
||||
--remove_file $datadir/test/load.data
|
||||
|
||||
#
|
||||
# MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column)
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column)
|
||||
--echo #
|
||||
CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO t1 VALUES ('f'), ('o'), ('o');
|
||||
|
@ -245,16 +246,16 @@ ALTER TABLE t1 ADD SYSTEM VERSIONING ;
|
|||
SELECT * FROM t1;
|
||||
REPLACE INTO t1 VALUES ('f'), ('o'), ('o');
|
||||
SELECT * FROM t1;
|
||||
# This should be equivalent to the REPLACE above
|
||||
--echo # This should be equivalent to the REPLACE above
|
||||
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
--let $datadir= `select @@datadir`
|
||||
--remove_file $datadir/test/load.data
|
||||
|
||||
#
|
||||
# MDEV-18953 Hash index on partial char field not working
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18953 Hash index on partial char field not working
|
||||
--echo #
|
||||
create table t1 (
|
||||
c char(10) character set utf8mb4,
|
||||
unique key a using hash (c(1))
|
||||
|
@ -267,17 +268,17 @@ insert into t1 values ('бб');
|
|||
insert into t1 values ('ббб');
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map
|
||||
--echo #
|
||||
CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2;
|
||||
INSERT INTO t1 VALUES (2);
|
||||
REPLACE INTO t1 VALUES (2);
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key'
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key'
|
||||
--echo #
|
||||
|
||||
set innodb_lock_wait_timeout= 10;
|
||||
|
||||
|
@ -317,15 +318,15 @@ INSERT IGNORE INTO t1 VALUES (4, 1)/*4*/;
|
|||
--connection default
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
#
|
||||
# MDEV-18791 Wrong error upon creating Aria table with long index on BLOB
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-18791 Wrong error upon creating Aria table with long index on BLOB
|
||||
--echo #
|
||||
--error ER_TOO_LONG_KEY
|
||||
CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
|
||||
|
||||
#
|
||||
# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
|
||||
--echo #
|
||||
create table t1(a int, unique(a) using hash);
|
||||
--let $count=150
|
||||
--let insert_stmt= insert into t1 values(200)
|
||||
|
@ -340,9 +341,9 @@ while ($count)
|
|||
--enable_query_log
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
|
||||
--echo #
|
||||
|
||||
--source include/have_binlog_format_row.inc
|
||||
SET binlog_row_image= NOBLOB;
|
||||
|
@ -352,20 +353,17 @@ INSERT INTO t1 VALUES (1,'foo');
|
|||
create table t2(id int primary key, a blob, b varchar(20) as (LEFT(a,2)));
|
||||
INSERT INTO t2 VALUES (1, 'foo', default);
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1, t2;
|
||||
SET binlog_row_image= FULL;
|
||||
|
||||
#
|
||||
# MDEV-22719 Long unique keys are not created when individual key_part->length < max_key_length but SUM(key_parts->length) > max_key_length
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-22719 Long unique keys are not created when individual key_part->length < max_key_length but SUM(key_parts->length) > max_key_length
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a int, b VARCHAR(1000), UNIQUE (a,b)) ENGINE=MyISAM;
|
||||
show index from t1;
|
||||
CREATE TABLE t2 (a varchar(900), b VARCHAR(900), UNIQUE (a,b)) ENGINE=MyISAM;
|
||||
show index from t2;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1,t2;
|
||||
|
||||
--echo #
|
||||
|
@ -397,9 +395,9 @@ update t1 set v2 = 1, v3 = -128;
|
|||
update t1,t2 set v1 = v2 , v5 = 0;
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# MDEV-23264 Unique blobs allow duplicate values upon UPDATE
|
||||
#
|
||||
--echo #
|
||||
--echo # MDEV-23264 Unique blobs allow duplicate values upon UPDATE
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (f TEXT UNIQUE);
|
||||
INSERT INTO t1 VALUES (NULL),(NULL);
|
||||
|
@ -435,6 +433,16 @@ alter table t1 reorganize partition p1 into (
|
|||
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29199 Unique hash key is ignored upon INSERT ... SELECT into non-empty MyISAM table
|
||||
--echo #
|
||||
create table t1 (a int, b text, unique(b)) engine=MyISAM;
|
||||
insert into t1 values (0,'aa');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 (a,b) select 1,'xxx' from seq_1_to_5;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
|
|
@ -354,7 +354,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
|
|||
o_totalprice between 200000 and 230000;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition
|
||||
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
|
||||
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) Using where; Using rowid filter
|
||||
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
|
||||
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
|
||||
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
|
||||
|
@ -391,6 +391,14 @@ EXPLAIN
|
|||
"key_length": "4",
|
||||
"used_key_parts": ["o_orderkey"],
|
||||
"ref": ["dbt3_s001.lineitem.l_orderkey"],
|
||||
"rowid_filter": {
|
||||
"range": {
|
||||
"key": "i_o_totalprice",
|
||||
"used_key_parts": ["o_totalprice"]
|
||||
},
|
||||
"rows": 69,
|
||||
"selectivity_pct": 4.6
|
||||
},
|
||||
"rows": 1,
|
||||
"filtered": 4.599999905,
|
||||
"attached_condition": "orders.o_totalprice between 200000 and 230000"
|
||||
|
@ -405,7 +413,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
|
|||
o_totalprice between 200000 and 230000;
|
||||
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
|
||||
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
|
||||
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 4.60 11.22 Using where
|
||||
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 4.60 100.00 Using where; Using rowid filter
|
||||
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
|
||||
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
|
||||
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
|
||||
|
@ -449,13 +457,26 @@ ANALYZE
|
|||
"key_length": "4",
|
||||
"used_key_parts": ["o_orderkey"],
|
||||
"ref": ["dbt3_s001.lineitem.l_orderkey"],
|
||||
"rowid_filter": {
|
||||
"range": {
|
||||
"key": "i_o_totalprice",
|
||||
"used_key_parts": ["o_totalprice"]
|
||||
},
|
||||
"rows": 69,
|
||||
"selectivity_pct": 4.6,
|
||||
"r_rows": 71,
|
||||
"r_lookups": 96,
|
||||
"r_selectivity_pct": 10.41666667,
|
||||
"r_buffer_size": "REPLACED",
|
||||
"r_filling_time_ms": "REPLACED"
|
||||
},
|
||||
"r_loops": 98,
|
||||
"rows": 1,
|
||||
"r_rows": 1,
|
||||
"r_rows": 0.112244898,
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"filtered": 4.599999905,
|
||||
"r_filtered": 11.2244898,
|
||||
"r_filtered": 100,
|
||||
"attached_condition": "orders.o_totalprice between 200000 and 230000"
|
||||
}
|
||||
}
|
||||
|
@ -615,7 +636,7 @@ l_quantity > 45 AND
|
|||
o_totalprice between 180000 and 230000;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter
|
||||
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
|
||||
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) Using where; Using rowid filter
|
||||
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
|
||||
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
|
||||
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
|
||||
|
@ -663,6 +684,14 @@ EXPLAIN
|
|||
"key_length": "4",
|
||||
"used_key_parts": ["o_orderkey"],
|
||||
"ref": ["dbt3_s001.lineitem.l_orderkey"],
|
||||
"rowid_filter": {
|
||||
"range": {
|
||||
"key": "i_o_totalprice",
|
||||
"used_key_parts": ["o_totalprice"]
|
||||
},
|
||||
"rows": 139,
|
||||
"selectivity_pct": 9.266666667
|
||||
},
|
||||
"rows": 1,
|
||||
"filtered": 9.266666412,
|
||||
"attached_condition": "orders.o_totalprice between 180000 and 230000"
|
||||
|
@ -678,7 +707,7 @@ l_quantity > 45 AND
|
|||
o_totalprice between 180000 and 230000;
|
||||
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
|
||||
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (11%) 11.69 100.00 Using index condition; Using where; Using rowid filter
|
||||
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.27 26.67 Using where
|
||||
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) 0.27 (25%) 9.27 100.00 Using where; Using rowid filter
|
||||
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
|
||||
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
|
||||
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
|
||||
|
@ -738,13 +767,26 @@ ANALYZE
|
|||
"key_length": "4",
|
||||
"used_key_parts": ["o_orderkey"],
|
||||
"ref": ["dbt3_s001.lineitem.l_orderkey"],
|
||||
"rowid_filter": {
|
||||
"range": {
|
||||
"key": "i_o_totalprice",
|
||||
"used_key_parts": ["o_totalprice"]
|
||||
},
|
||||
"rows": 139,
|
||||
"selectivity_pct": 9.266666667,
|
||||
"r_rows": 144,
|
||||
"r_lookups": 59,
|
||||
"r_selectivity_pct": 25.42372881,
|
||||
"r_buffer_size": "REPLACED",
|
||||
"r_filling_time_ms": "REPLACED"
|
||||
},
|
||||
"r_loops": 60,
|
||||
"rows": 1,
|
||||
"r_rows": 1,
|
||||
"r_rows": 0.266666667,
|
||||
"r_table_time_ms": "REPLACED",
|
||||
"r_other_time_ms": "REPLACED",
|
||||
"filtered": 9.266666412,
|
||||
"r_filtered": 26.66666667,
|
||||
"r_filtered": 100,
|
||||
"attached_condition": "orders.o_totalprice between 180000 and 230000"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2,t3
|
||||
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
|
||||
|
@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
|
||||
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
|
||||
|
@ -3632,7 +3632,7 @@ t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2,t3
|
||||
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
|
||||
|
@ -3640,7 +3640,7 @@ t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
DROP TABLE t1,t2,t3;
|
||||
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
|
||||
CREATE TABLE t2 ( f11 int PRIMARY KEY );
|
||||
|
@ -5639,4 +5639,60 @@ EXECUTE stmt;
|
|||
COUNT(DISTINCT a)
|
||||
3
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
|
||||
# failed in Item_cond::remove_eq_conds on SELECT
|
||||
#
|
||||
CREATE TABLE t1 (a INT);
|
||||
INSERT INTO t1 VALUES (1),(2),(3);
|
||||
# Test for nested OR conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
a
|
||||
1
|
||||
EXPLAIN EXTENDED
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using temporary
|
||||
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
Warnings:
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
|
||||
Note 1249 Select 2 was reduced during optimization
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 and (1 or <expr_cache><`test`.`t1`.`a`>((/* select#3 */ select 3 from DUAL where `test`.`t1`.`a` = `test`.`t1`.`a`)) = 3)
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v1;
|
||||
a
|
||||
1
|
||||
# Test for nested AND conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
a
|
||||
1
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v2;
|
||||
a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
DROP VIEW v1, v2;
|
||||
End of 10.0 tests
|
||||
|
|
|
@ -4747,4 +4747,44 @@ EXECUTE stmt;
|
|||
--enable_warnings
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
|
||||
--echo # failed in Item_cond::remove_eq_conds on SELECT
|
||||
--echo #
|
||||
CREATE TABLE t1 (a INT);
|
||||
INSERT INTO t1 VALUES (1),(2),(3);
|
||||
|
||||
--echo # Test for nested OR conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
|
||||
EXPLAIN EXTENDED
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
|
||||
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v1;
|
||||
|
||||
--echo # Test for nested AND conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
|
||||
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v2;
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP VIEW v1, v2;
|
||||
|
||||
--echo End of 10.0 tests
|
||||
|
|
|
@ -3627,7 +3627,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2,t3
|
||||
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
|
||||
|
@ -3635,7 +3635,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
|
||||
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
|
||||
|
@ -3643,7 +3643,7 @@ t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2,t3
|
||||
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
|
||||
|
@ -3651,7 +3651,7 @@ t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
|
||||
DROP TABLE t1,t2,t3;
|
||||
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
|
||||
CREATE TABLE t2 ( f11 int PRIMARY KEY );
|
||||
|
@ -5650,6 +5650,62 @@ EXECUTE stmt;
|
|||
COUNT(DISTINCT a)
|
||||
3
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
|
||||
# failed in Item_cond::remove_eq_conds on SELECT
|
||||
#
|
||||
CREATE TABLE t1 (a INT);
|
||||
INSERT INTO t1 VALUES (1),(2),(3);
|
||||
# Test for nested OR conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
a
|
||||
1
|
||||
EXPLAIN EXTENDED
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using temporary
|
||||
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
Warnings:
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
|
||||
Note 1249 Select 2 was reduced during optimization
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 and (1 or <expr_cache><`test`.`t1`.`a`>((/* select#3 */ select 3 from DUAL where `test`.`t1`.`a` = `test`.`t1`.`a`)) = 3)
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v1;
|
||||
a
|
||||
1
|
||||
# Test for nested AND conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
a
|
||||
1
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v2;
|
||||
a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
DROP VIEW v1, v2;
|
||||
End of 10.0 tests
|
||||
set join_cache_level=default;
|
||||
set @@optimizer_switch=@save_optimizer_switch_jcl6;
|
||||
|
|
|
@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2,t3
|
||||
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
|
||||
|
@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
|
||||
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
|
||||
|
@ -3632,7 +3632,7 @@ t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
EXPLAIN
|
||||
SELECT t3.a FROM t1,t2,t3
|
||||
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
|
||||
|
@ -3640,7 +3640,7 @@ t3.c IN ('bb','ee');
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
|
||||
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where
|
||||
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
|
||||
1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
|
||||
DROP TABLE t1,t2,t3;
|
||||
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
|
||||
CREATE TABLE t2 ( f11 int PRIMARY KEY );
|
||||
|
@ -5639,4 +5639,60 @@ EXECUTE stmt;
|
|||
COUNT(DISTINCT a)
|
||||
3
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
|
||||
# failed in Item_cond::remove_eq_conds on SELECT
|
||||
#
|
||||
CREATE TABLE t1 (a INT);
|
||||
INSERT INTO t1 VALUES (1),(2),(3);
|
||||
# Test for nested OR conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
a
|
||||
1
|
||||
EXPLAIN EXTENDED
|
||||
SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using temporary
|
||||
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
Warnings:
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
|
||||
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
|
||||
Note 1249 Select 2 was reduced during optimization
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 and (1 or <expr_cache><`test`.`t1`.`a`>((/* select#3 */ select 3 from DUAL where `test`.`t1`.`a` = `test`.`t1`.`a`)) = 3)
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
|
||||
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v1;
|
||||
a
|
||||
1
|
||||
# Test for nested AND conditions:
|
||||
SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
a
|
||||
1
|
||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
EXECUTE stmt;
|
||||
a
|
||||
1
|
||||
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
|
||||
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
|
||||
SELECT * FROM v2;
|
||||
a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
DROP VIEW v1, v2;
|
||||
End of 10.0 tests
|
||||
|
|
|
@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||
1 PRIMARY t3_b eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_a.PARENTID 1 Using where
|
||||
1 PRIMARY t3_c eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_b.PARENTID 1 Using where
|
||||
1 PRIMARY t3_d eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_c.PARENTID 1 Using where
|
||||
1 PRIMARY t3_e eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3_d.PARENTID 1 Using where
|
||||
1 PRIMARY t3_e ref|filter PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX FFOLDERID_IDX|CMFLDRPARNT_IDX 34|35 test.t3_d.PARENTID 1 (29%) Using where; Using rowid filter
|
||||
drop table t1, t2, t3, t4;
|
||||
CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB;
|
||||
INSERT INTO t1 VALUES (1),(2);
|
||||
|
|
|
@ -1320,6 +1320,28 @@ CASE WHEN a THEN DEFAULT(a) END
|
|||
DROP TABLE t1;
|
||||
SET timestamp=DEFAULT;
|
||||
#
|
||||
# MDEV-27653 long uniques don't work with unicode collations
|
||||
#
|
||||
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
|
||||
SET time_zone='+00:00';
|
||||
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
|
||||
SET time_zone='+01:00';
|
||||
INSERT INTO t1 SELECT MAX(a) FROM t1;
|
||||
ERROR 23000: Duplicate entry '2001-01-01 11:20:30' for key 'a'
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
2001-01-01 11:20:30
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
|
||||
SET time_zone='+00:00';
|
||||
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
|
||||
SET time_zone='+01:00';
|
||||
CHECK TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
DROP TABLE t1;
|
||||
SET time_zone=DEFAULT;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -877,6 +877,27 @@ SELECT CASE WHEN a THEN DEFAULT(a) END FROM t1;
|
|||
DROP TABLE t1;
|
||||
SET timestamp=DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27653 long uniques don't work with unicode collations
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
|
||||
SET time_zone='+00:00';
|
||||
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
|
||||
SET time_zone='+01:00';
|
||||
--error ER_DUP_ENTRY
|
||||
INSERT INTO t1 SELECT MAX(a) FROM t1;
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
|
||||
SET time_zone='+00:00';
|
||||
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
|
||||
SET time_zone='+01:00';
|
||||
CHECK TABLE t1;
|
||||
DROP TABLE t1;
|
||||
SET time_zone=DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
|
|
@ -4417,6 +4417,46 @@ pk a bit_or
|
|||
DROP TABLE t2;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
|
||||
#
|
||||
CREATE TABLE t1 (i1 int, a int);
|
||||
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
|
||||
CREATE TABLE t2 (i2 int);
|
||||
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
|
||||
SELECT
|
||||
a,
|
||||
RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
FROM
|
||||
t1, t2 WHERE t2.i2 = t1.i1
|
||||
GROUP BY
|
||||
a;
|
||||
a RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
|
||||
#
|
||||
CREATE TABLE t1 (UID BIGINT);
|
||||
CREATE TABLE t2 (UID BIGINT);
|
||||
CREATE TABLE t3 (UID BIGINT);
|
||||
insert into t1 VALUES (1),(2);
|
||||
insert into t2 VALUES (1),(2);
|
||||
insert into t3 VALUES (1),(2);
|
||||
SELECT
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
FROM t1 TT1,
|
||||
t2 TT2,
|
||||
t3 TT3
|
||||
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
|
||||
GROUP BY TT1.UID
|
||||
;
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
1
|
||||
1
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -2816,6 +2816,46 @@ DROP TABLE t2;
|
|||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (i1 int, a int);
|
||||
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
|
||||
|
||||
CREATE TABLE t2 (i2 int);
|
||||
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
|
||||
|
||||
SELECT
|
||||
a,
|
||||
RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
FROM
|
||||
t1, t2 WHERE t2.i2 = t1.i1
|
||||
GROUP BY
|
||||
a;
|
||||
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
|
||||
--echo #
|
||||
CREATE TABLE t1 (UID BIGINT);
|
||||
CREATE TABLE t2 (UID BIGINT);
|
||||
CREATE TABLE t3 (UID BIGINT);
|
||||
|
||||
insert into t1 VALUES (1),(2);
|
||||
insert into t2 VALUES (1),(2);
|
||||
insert into t3 VALUES (1),(2);
|
||||
SELECT
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
FROM t1 TT1,
|
||||
t2 TT2,
|
||||
t3 TT3
|
||||
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
|
||||
GROUP BY TT1.UID
|
||||
;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
|
|
|
@ -24,3 +24,66 @@ pk count(a) over (order by pk rows between 2 preceding and 2 following)
|
|||
28 5
|
||||
27 5
|
||||
drop table t0,t1;
|
||||
#
|
||||
# MDEV-30052: Crash with a query containing nested WINDOW clauses
|
||||
#
|
||||
CREATE TABLE t1 (c INT);
|
||||
insert into t1 values (1),(2);
|
||||
UPDATE t1 SET c=1
|
||||
WHERE c=2
|
||||
ORDER BY
|
||||
(1 IN ((
|
||||
SELECT *
|
||||
FROM (SELECT * FROM t1) AS v1
|
||||
GROUP BY c
|
||||
WINDOW v2 AS (ORDER BY
|
||||
(SELECT *
|
||||
FROM t1
|
||||
GROUP BY c
|
||||
WINDOW v3 AS (PARTITION BY c)
|
||||
)
|
||||
)
|
||||
))
|
||||
);
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-29359: Server crashed with heap-use-after-free in
|
||||
# Field::is_null(long long) const (Just testcase)
|
||||
#
|
||||
CREATE TABLE t1 (id int);
|
||||
INSERT INTO t1 VALUES (-1),(0),(84);
|
||||
SELECT
|
||||
id IN (SELECT id
|
||||
FROM t1
|
||||
WINDOW w AS (ORDER BY (SELECT 1
|
||||
FROM t1
|
||||
WHERE
|
||||
EXISTS ( SELECT id
|
||||
FROM t1
|
||||
GROUP BY id
|
||||
WINDOW w2 AS (ORDER BY id)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
FROM t1;
|
||||
id IN (SELECT id
|
||||
FROM t1
|
||||
WINDOW w AS (ORDER BY (SELECT 1
|
||||
FROM t1
|
||||
WHERE
|
||||
EXISTS ( SELECT id
|
||||
FROM t1
|
||||
GROUP BY id
|
||||
WINDOW w2 AS (ORDER BY id)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
1
|
||||
1
|
||||
1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
|
|
|
@ -33,3 +33,58 @@ limit 4;
|
|||
--disable_view_protocol
|
||||
|
||||
drop table t0,t1;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30052: Crash with a query containing nested WINDOW clauses
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (c INT);
|
||||
insert into t1 values (1),(2);
|
||||
UPDATE t1 SET c=1
|
||||
WHERE c=2
|
||||
ORDER BY
|
||||
(1 IN ((
|
||||
SELECT *
|
||||
FROM (SELECT * FROM t1) AS v1
|
||||
GROUP BY c
|
||||
WINDOW v2 AS (ORDER BY
|
||||
(SELECT *
|
||||
FROM t1
|
||||
GROUP BY c
|
||||
WINDOW v3 AS (PARTITION BY c)
|
||||
)
|
||||
)
|
||||
))
|
||||
);
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29359: Server crashed with heap-use-after-free in
|
||||
--echo # Field::is_null(long long) const (Just testcase)
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (id int);
|
||||
INSERT INTO t1 VALUES (-1),(0),(84);
|
||||
|
||||
SELECT
|
||||
id IN (SELECT id
|
||||
FROM t1
|
||||
WINDOW w AS (ORDER BY (SELECT 1
|
||||
FROM t1
|
||||
WHERE
|
||||
EXISTS ( SELECT id
|
||||
FROM t1
|
||||
GROUP BY id
|
||||
WINDOW w2 AS (ORDER BY id)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,53 @@
|
|||
RESET MASTER;
|
||||
connect pause_purge,localhost,root;
|
||||
START TRANSACTION WITH CONSISTENT SNAPSHOT;
|
||||
connection default;
|
||||
CREATE TABLE t (pk int PRIMARY KEY, sk INT UNIQUE) ENGINE=InnoDB;
|
||||
INSERT INTO t VALUES (10, 100);
|
||||
connect con1,localhost,root;
|
||||
BEGIN;
|
||||
SELECT * FROM t WHERE sk = 100 FOR UPDATE;
|
||||
pk sk
|
||||
10 100
|
||||
connect con2,localhost,root;
|
||||
SET DEBUG_SYNC="lock_wait_start SIGNAL insert_wait_started";
|
||||
INSERT INTO t VALUES (5, 100) # trx 1;
|
||||
connect con3,localhost,root;
|
||||
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET DEBUG_SYNC="now WAIT_FOR insert_wait_started";
|
||||
SET DEBUG_SYNC="lock_wait_start SIGNAL delete_started_waiting";
|
||||
BEGIN;
|
||||
UPDATE t SET sk = 200 WHERE sk = 100; # trx 2;
|
||||
connection con1;
|
||||
SET DEBUG_SYNC="now WAIT_FOR delete_started_waiting";
|
||||
DELETE FROM t WHERE sk=100;
|
||||
COMMIT;
|
||||
disconnect con1;
|
||||
connection con2;
|
||||
disconnect con2;
|
||||
connection con3;
|
||||
must be logged in ROW format as the only event of trx 2 (con3)
|
||||
INSERT INTO t VALUES (11, 101);
|
||||
COMMIT;
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; DELETE FROM t WHERE sk=100
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Query # # use `test`; INSERT INTO t VALUES (5, 100) # trx 1
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
|
||||
master-bin.000001 # Annotate_rows # # INSERT INTO t VALUES (11, 101)
|
||||
master-bin.000001 # Table_map # # table_id: # (test.t)
|
||||
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Xid # # COMMIT /* XID */
|
||||
disconnect con3;
|
||||
connection default;
|
||||
SELECT * FROM t;
|
||||
pk sk
|
||||
5 100
|
||||
11 101
|
||||
disconnect pause_purge;
|
||||
SET DEBUG_SYNC="RESET";
|
||||
DROP TABLE t;
|
|
@ -0,0 +1,92 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/have_binlog_format_mixed.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
RESET MASTER;
|
||||
|
||||
# MDEV-30010 merely adds is a Read-Committed version MDEV-30225 test
|
||||
# solely to prove the RC isolation yields ROW binlog format as it is
|
||||
# supposed to:
|
||||
# https://mariadb.com/kb/en/unsafe-statements-for-statement-based-replication/#isolation-levels.
|
||||
# The original MDEV-30225 test is adapted to the RC to create
|
||||
# a similar safisticated scenario which does not lead to any deadlock though.
|
||||
|
||||
--connect (pause_purge,localhost,root)
|
||||
START TRANSACTION WITH CONSISTENT SNAPSHOT;
|
||||
|
||||
--connection default
|
||||
CREATE TABLE t (pk int PRIMARY KEY, sk INT UNIQUE) ENGINE=InnoDB;
|
||||
INSERT INTO t VALUES (10, 100);
|
||||
|
||||
--connect (con1,localhost,root)
|
||||
BEGIN; # trx 0
|
||||
SELECT * FROM t WHERE sk = 100 FOR UPDATE;
|
||||
|
||||
--connect (con2,localhost,root)
|
||||
SET DEBUG_SYNC="lock_wait_start SIGNAL insert_wait_started";
|
||||
# trx 1 is locked on try to read the record in secondary index during duplicates
|
||||
# check. It's the first in waiting queue, that's why it will be woken up firstly
|
||||
# when trx 0 commits.
|
||||
--send INSERT INTO t VALUES (5, 100) # trx 1
|
||||
|
||||
--connect (con3,localhost,root)
|
||||
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET DEBUG_SYNC="now WAIT_FOR insert_wait_started";
|
||||
SET DEBUG_SYNC="lock_wait_start SIGNAL delete_started_waiting";
|
||||
# trx 2 can delete (5, 100) on master, but not on slave, as on slave trx 1
|
||||
# can insert (5, 100) after trx 2 positioned it's cursor. Trx 2 lock is placed
|
||||
# in waiting queue after trx 1 lock, but its persistent cursor position was
|
||||
# stored on (100, 10) record in secondary index before suspending. After trx 1
|
||||
# is committed, trx 2 will restore persistent cursor position on (100, 10). As
|
||||
# (100, 5) secondary index record was inserted before (100, 10) in logical
|
||||
# order, and (100, 10) record is delete-marked, trx 2 just continues scanning.
|
||||
#
|
||||
# Note. There can be several records with the same key in unique secondary
|
||||
# index, but only one of them must be non-delete-marked. That's why when we do
|
||||
# point query, cursor position is set in the first record in logical order, and
|
||||
# then records are iterated until either non-delete-marked record is found or
|
||||
# all records with the same unique fields are iterated.
|
||||
|
||||
# to prepare showing interesting binlog events
|
||||
--let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $binlog_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
BEGIN;
|
||||
--send UPDATE t SET sk = 200 WHERE sk = 100; # trx 2
|
||||
|
||||
--connection con1
|
||||
SET DEBUG_SYNC="now WAIT_FOR delete_started_waiting";
|
||||
DELETE FROM t WHERE sk=100; # trx 0
|
||||
COMMIT;
|
||||
--disconnect con1
|
||||
|
||||
--connection con2
|
||||
--reap
|
||||
--disconnect con2
|
||||
|
||||
--connection con3
|
||||
--error 0
|
||||
--reap
|
||||
if (`SELECT ROW_COUNT() > 0`)
|
||||
{
|
||||
--echo unexpected effective UPDATE
|
||||
--die
|
||||
}
|
||||
--echo must be logged in ROW format as the only event of trx 2 (con3)
|
||||
INSERT INTO t VALUES (11, 101);
|
||||
COMMIT;
|
||||
--source include/show_binlog_events.inc
|
||||
--disconnect con3
|
||||
|
||||
--connection default
|
||||
# If the bug is not fixed, we will see the row inserted by trx 1 here. This can
|
||||
# cause duplicate key error on slave, when some other trx tries in insert row
|
||||
# with the same secondary key, as was inserted by trx 1, and not deleted by trx
|
||||
# 2.
|
||||
SELECT * FROM t;
|
||||
|
||||
--disconnect pause_purge
|
||||
SET DEBUG_SYNC="RESET";
|
||||
DROP TABLE t;
|
||||
--source include/wait_until_count_sessions.inc
|
|
@ -4,6 +4,7 @@ call mtr.add_suppression("InnoDB: Recovery cannot access file");
|
|||
call mtr.add_suppression("InnoDB: Plugin initialization aborted");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' init function returned error\\.");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
|
||||
call mtr.add_suppression("InnoDB: (Unable to apply log to|Discarding log for) corrupted page ");
|
||||
call mtr.add_suppression("InnoDB: Cannot apply log to \\[page id: space=[1-9][0-9]*, page number=0\\] of corrupted file '.*test.t[1-5]\\.ibd'");
|
||||
call mtr.add_suppression("InnoDB: Failed to read page .* from file '.*'");
|
||||
call mtr.add_suppression("InnoDB: OPT_PAGE_CHECKSUM mismatch");
|
||||
|
|
|
@ -4423,6 +4423,46 @@ pk a bit_or
|
|||
DROP TABLE t2;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
|
||||
#
|
||||
CREATE TABLE t1 (i1 int, a int);
|
||||
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
|
||||
CREATE TABLE t2 (i2 int);
|
||||
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
|
||||
SELECT
|
||||
a,
|
||||
RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
FROM
|
||||
t1, t2 WHERE t2.i2 = t1.i1
|
||||
GROUP BY
|
||||
a;
|
||||
a RANK() OVER (ORDER BY SUM(DISTINCT i1))
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
|
||||
#
|
||||
CREATE TABLE t1 (UID BIGINT);
|
||||
CREATE TABLE t2 (UID BIGINT);
|
||||
CREATE TABLE t3 (UID BIGINT);
|
||||
insert into t1 VALUES (1),(2);
|
||||
insert into t2 VALUES (1),(2);
|
||||
insert into t3 VALUES (1),(2);
|
||||
SELECT
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
FROM t1 TT1,
|
||||
t2 TT2,
|
||||
t3 TT3
|
||||
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
|
||||
GROUP BY TT1.UID
|
||||
;
|
||||
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
|
||||
1
|
||||
1
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
||||
#
|
||||
|
|
|
@ -9,6 +9,7 @@ call mtr.add_suppression("InnoDB: Recovery cannot access file");
|
|||
call mtr.add_suppression("InnoDB: Plugin initialization aborted");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' init function returned error\\.");
|
||||
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
|
||||
call mtr.add_suppression("InnoDB: (Unable to apply log to|Discarding log for) corrupted page ");
|
||||
call mtr.add_suppression("InnoDB: Cannot apply log to \\[page id: space=[1-9][0-9]*, page number=0\\] of corrupted file '.*test.t[1-5]\\.ibd'");
|
||||
call mtr.add_suppression("InnoDB: Failed to read page .* from file '.*'");
|
||||
call mtr.add_suppression("InnoDB: OPT_PAGE_CHECKSUM mismatch");
|
||||
|
|
|
@ -144,7 +144,6 @@ SET debug_sync='RESET';
|
|||
connection node_1;
|
||||
SET GLOBAL wsrep_slave_threads = DEFAULT;
|
||||
connection node_2;
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
SELECT * FROM t1;
|
||||
f1 f2 f3
|
||||
1 1 0
|
||||
|
|
|
@ -140,6 +140,14 @@ SELECT * FROM t1;
|
|||
# original state in node 1
|
||||
INSERT INTO t1 VALUES (7,7,7);
|
||||
INSERT INTO t1 VALUES (8,8,8);
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
--connection node_1
|
||||
--let $wait_condition = SELECT COUNT(*) = 7 FROM t1
|
||||
--source include/wait_condition.inc
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
|
@ -268,7 +276,6 @@ SET debug_sync='RESET';
|
|||
SET GLOBAL wsrep_slave_threads = DEFAULT;
|
||||
|
||||
--connection node_2
|
||||
SET SESSION wsrep_sync_wait=15;
|
||||
SELECT * FROM t1;
|
||||
|
||||
# replicate some transactions, so that wsrep slave thread count can reach
|
||||
|
@ -276,4 +283,13 @@ SELECT * FROM t1;
|
|||
INSERT INTO t1 VALUES (7,7,7);
|
||||
INSERT INTO t1 VALUES (8,8,8);
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
--connection node_1
|
||||
--let $wait_condition = SELECT COUNT(*) = 7 FROM t1
|
||||
--source include/wait_condition.inc
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT * FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -205,8 +205,3 @@ CALL mtr.add_suppression("conflict state 7 after post commit");
|
|||
|
||||
# Warning happens when the cluster is started for the first time
|
||||
CALL mtr.add_suppression("Skipped GCache ring buffer recovery");
|
||||
|
||||
--connection node_2
|
||||
call mtr.add_suppression("Error in Log_event::read_log_event():.*");
|
||||
CALL mtr.add_suppression("Skipped GCache ring buffer recovery");
|
||||
|
||||
|
|
30
mysql-test/suite/mariabackup/incremental_drop_db.result
Normal file
30
mysql-test/suite/mariabackup/incremental_drop_db.result
Normal file
|
@ -0,0 +1,30 @@
|
|||
call mtr.add_suppression("InnoDB: New log files created");
|
||||
#
|
||||
# Start of 10.3 tests
|
||||
#
|
||||
#
|
||||
# MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
|
||||
#
|
||||
CREATE DATABASE db1;
|
||||
CREATE DATABASE db2;
|
||||
CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
|
||||
CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
|
||||
# Create base backup
|
||||
DROP DATABASE db1;
|
||||
# Create incremental backup
|
||||
# Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
|
||||
# Prepare base backup, apply incremental one
|
||||
# shutdown server
|
||||
# remove datadir
|
||||
# xtrabackup move back
|
||||
# restart
|
||||
# Expect no 'db1' in the output, because it was really dropped.
|
||||
# Expect 'db2' in the ouput, because it was not dropped!
|
||||
# (its incremental directory was emptied only)
|
||||
SHOW DATABASES LIKE 'db%';
|
||||
Database (db%)
|
||||
db2
|
||||
DROP DATABASE db2;
|
||||
#
|
||||
# End of 10.3 tests
|
||||
#
|
68
mysql-test/suite/mariabackup/incremental_drop_db.test
Normal file
68
mysql-test/suite/mariabackup/incremental_drop_db.test
Normal file
|
@ -0,0 +1,68 @@
|
|||
--source include/have_innodb.inc
|
||||
call mtr.add_suppression("InnoDB: New log files created");
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.3 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
|
||||
--echo #
|
||||
|
||||
--let $datadir=`SELECT @@datadir`
|
||||
--let $basedir=$MYSQLTEST_VARDIR/tmp/backup
|
||||
--let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1
|
||||
|
||||
# Create two databases:
|
||||
# - db1 is dropped normally below
|
||||
# - db2 is used to cover a corner case: its db.opt file is removed
|
||||
|
||||
# Incremental backup contains:
|
||||
# - no directory for db1
|
||||
# - an empty directory for db2 (after we remove db2/db.opt)
|
||||
|
||||
|
||||
CREATE DATABASE db1;
|
||||
CREATE DATABASE db2;
|
||||
|
||||
# Add some tables to db1
|
||||
CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
|
||||
CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
|
||||
|
||||
--echo # Create base backup
|
||||
--disable_result_log
|
||||
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir
|
||||
--enable_result_log
|
||||
|
||||
DROP DATABASE db1;
|
||||
|
||||
--echo # Create incremental backup
|
||||
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir
|
||||
|
||||
--echo # Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
|
||||
--remove_file $incremental_dir/db2/db.opt
|
||||
|
||||
|
||||
--echo # Prepare base backup, apply incremental one
|
||||
--disable_result_log
|
||||
--exec $XTRABACKUP --prepare --target-dir=$basedir
|
||||
--exec $XTRABACKUP --prepare --target-dir=$basedir --incremental-dir=$incremental_dir
|
||||
--enable_result_log
|
||||
|
||||
--let $targetdir=$basedir
|
||||
--source include/restart_and_restore.inc
|
||||
--enable_result_log
|
||||
|
||||
--echo # Expect no 'db1' in the output, because it was really dropped.
|
||||
--echo # Expect 'db2' in the ouput, because it was not dropped!
|
||||
--echo # (its incremental directory was emptied only)
|
||||
|
||||
SHOW DATABASES LIKE 'db%';
|
||||
DROP DATABASE db2;
|
||||
|
||||
--rmdir $basedir
|
||||
--rmdir $incremental_dir
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.3 tests
|
||||
--echo #
|
|
@ -521,3 +521,10 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show status like \'server_audit_
|
|||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show variables like \'server_audit%\'',0
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,plugin,
|
||||
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'uninstall plugin server_audit',0
|
||||
#
|
||||
# MDEV-27631 Assertion `global_status_var.global_memory_used == 0' failed in mysqld_exit
|
||||
#
|
||||
install plugin server_audit soname 'server_audit';
|
||||
uninstall plugin server_audit;
|
||||
Warnings:
|
||||
Warning 1620 Plugin is busy and will be uninstalled on shutdown
|
||||
|
|
|
@ -235,3 +235,8 @@ uninstall plugin server_audit;
|
|||
cat_file $MYSQLD_DATADIR/server_audit.log;
|
||||
remove_file $MYSQLD_DATADIR/server_audit.log;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27631 Assertion `global_status_var.global_memory_used == 0' failed in mysqld_exit
|
||||
--echo #
|
||||
install plugin server_audit soname 'server_audit';
|
||||
uninstall plugin server_audit;
|
||||
|
|
75
mysql-test/suite/rpl/include/create_or_drop_sync_func.inc
Normal file
75
mysql-test/suite/rpl/include/create_or_drop_sync_func.inc
Normal file
|
@ -0,0 +1,75 @@
|
|||
# Creates or drops a stored function as a part of debug-sync based
|
||||
# synchronization mechanism between replication servers.
|
||||
#
|
||||
# Parameters:
|
||||
# $create_or_drop= [create]
|
||||
# $server_master = [master]
|
||||
# $server_slave = [slave]
|
||||
if (!$create_or_drop)
|
||||
{
|
||||
--let $create_or_drop=create
|
||||
}
|
||||
|
||||
if (`select strcmp('$create_or_drop', 'create') = 0`)
|
||||
{
|
||||
if (!$server_master)
|
||||
{
|
||||
--let $server_master=master
|
||||
}
|
||||
if (!$server_slave)
|
||||
{
|
||||
--let $server_slave=slave
|
||||
}
|
||||
|
||||
--connection $server_master
|
||||
# Use a stored function to inject a debug_sync into the appropriate THD.
|
||||
# The function does nothing on the master, and on the slave it injects the
|
||||
# desired debug_sync action(s).
|
||||
SET sql_log_bin=0;
|
||||
--delimiter ||
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
--delimiter ;
|
||||
SET sql_log_bin=1;
|
||||
|
||||
--connection $server_slave
|
||||
|
||||
SET sql_log_bin=0;
|
||||
--delimiter ||
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
IF d1 != '' THEN
|
||||
SET debug_sync = d1;
|
||||
END IF;
|
||||
IF d2 != '' THEN
|
||||
SET debug_sync = d2;
|
||||
END IF;
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
--delimiter ;
|
||||
SET sql_log_bin=1;
|
||||
}
|
||||
|
||||
if (`select strcmp('$create_or_drop', 'drop') = 0`)
|
||||
{
|
||||
if (!$server_slave)
|
||||
{
|
||||
--let $server_slave=slave=
|
||||
}
|
||||
if (!$server_master)
|
||||
{
|
||||
--let $server_master=master
|
||||
}
|
||||
--connection $server_slave
|
||||
SET DEBUG_SYNC='RESET';
|
||||
|
||||
--connection $server_master
|
||||
SET DEBUG_SYNC='RESET';
|
||||
DROP FUNCTION foo;
|
||||
}
|
|
@ -8,6 +8,8 @@ connection master;
|
|||
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = innodb;
|
||||
INSERT INTO t1 VALUES(100);
|
||||
connection slave;
|
||||
call mtr.add_suppression("Deadlock found when trying to get lock");
|
||||
call mtr.add_suppression("Commit failed due to failure of an earlier commit");
|
||||
include/stop_slave.inc
|
||||
SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode;
|
||||
|
@ -31,6 +33,167 @@ connection backup_slave;
|
|||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/diff_tables.inc [master:t1,slave:t1]
|
||||
# MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
#
|
||||
# Normal XA COMMIT
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (102);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (101);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (102);
|
||||
connection master;
|
||||
XA COMMIT '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
#
|
||||
# Normal XA ROLLBACK
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
Warnings:
|
||||
Note 1255 Slave already has been stopped
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (104);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (103);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (104);
|
||||
connection master;
|
||||
XA ROLLBACK '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
#
|
||||
# Errored out XA COMMIT
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
Warnings:
|
||||
Note 1255 Slave already has been stopped
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (106);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (105);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (106);
|
||||
connection master;
|
||||
XA COMMIT '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
#
|
||||
# Errored out XA ROLLBACK
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (108);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (107);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (108);
|
||||
connection master;
|
||||
XA ROLLBACK '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.slave_parallel_threads= @old_parallel_threads;
|
||||
|
|
207
mysql-test/suite/rpl/r/parallel_backup_lsu_off.result
Normal file
207
mysql-test/suite/rpl/r/parallel_backup_lsu_off.result
Normal file
|
@ -0,0 +1,207 @@
|
|||
# Specialized --log-slave-updates = 0 version of parallel_backup test.
|
||||
# MDEV-21953: deadlock between BACKUP STAGE BLOCK_COMMIT and parallel
|
||||
# MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
include/master-slave.inc
|
||||
[connection master]
|
||||
#
|
||||
# MDEV-21953: deadlock between BACKUP STAGE BLOCK_COMMIT and parallel
|
||||
# replication
|
||||
#
|
||||
connection master;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = innodb;
|
||||
INSERT INTO t1 VALUES(100);
|
||||
connection slave;
|
||||
call mtr.add_suppression("Deadlock found when trying to get lock");
|
||||
call mtr.add_suppression("Commit failed due to failure of an earlier commit");
|
||||
include/stop_slave.inc
|
||||
SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode;
|
||||
SET @@global.slave_parallel_threads= 2;
|
||||
SET @@global.slave_parallel_mode = 'optimistic';
|
||||
connection master;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (2);
|
||||
connect aux_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
connect backup_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/diff_tables.inc [master:t1,slave:t1]
|
||||
# MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
#
|
||||
# Normal XA COMMIT
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (102);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (101);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (102);
|
||||
connection master;
|
||||
XA COMMIT '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
#
|
||||
# Normal XA ROLLBACK
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
Warnings:
|
||||
Note 1255 Slave already has been stopped
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (104);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (103);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (104);
|
||||
connection master;
|
||||
XA ROLLBACK '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
#
|
||||
# Errored out XA COMMIT
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
Warnings:
|
||||
Note 1255 Slave already has been stopped
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (106);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (105);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (106);
|
||||
connection master;
|
||||
XA COMMIT '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
#
|
||||
# Errored out XA ROLLBACK
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (108);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (107);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (108);
|
||||
connection master;
|
||||
XA ROLLBACK '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.slave_parallel_threads= @old_parallel_threads;
|
||||
SET @@global.slave_parallel_mode = @old_parallel_mode;
|
||||
include/start_slave.inc
|
||||
connection server_1;
|
||||
DROP TABLE t1;
|
||||
include/rpl_end.inc
|
207
mysql-test/suite/rpl/r/parallel_backup_slave_binlog_off.result
Normal file
207
mysql-test/suite/rpl/r/parallel_backup_slave_binlog_off.result
Normal file
|
@ -0,0 +1,207 @@
|
|||
# Specialized --skip-log-bin slave version of parallel_backup test.
|
||||
# MDEV-21953: deadlock between BACKUP STAGE BLOCK_COMMIT and parallel
|
||||
# MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
include/master-slave.inc
|
||||
[connection master]
|
||||
#
|
||||
# MDEV-21953: deadlock between BACKUP STAGE BLOCK_COMMIT and parallel
|
||||
# replication
|
||||
#
|
||||
connection master;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = innodb;
|
||||
INSERT INTO t1 VALUES(100);
|
||||
connection slave;
|
||||
call mtr.add_suppression("Deadlock found when trying to get lock");
|
||||
call mtr.add_suppression("Commit failed due to failure of an earlier commit");
|
||||
include/stop_slave.inc
|
||||
SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode;
|
||||
SET @@global.slave_parallel_threads= 2;
|
||||
SET @@global.slave_parallel_mode = 'optimistic';
|
||||
connection master;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (2);
|
||||
connect aux_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
connect backup_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/diff_tables.inc [master:t1,slave:t1]
|
||||
# MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
#
|
||||
# Normal XA COMMIT
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (102);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (101);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (102);
|
||||
connection master;
|
||||
XA COMMIT '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
#
|
||||
# Normal XA ROLLBACK
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
Warnings:
|
||||
Note 1255 Slave already has been stopped
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (104);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (103);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (104);
|
||||
connection master;
|
||||
XA ROLLBACK '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
#
|
||||
# Errored out XA COMMIT
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
Warnings:
|
||||
Note 1255 Slave already has been stopped
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (106);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (105);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (106);
|
||||
connection master;
|
||||
XA COMMIT '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
#
|
||||
# Errored out XA ROLLBACK
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
connection master;
|
||||
connection aux_slave;
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES (108);
|
||||
connection master;
|
||||
XA START '1';
|
||||
INSERT INTO t1 VALUES (107);
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
connection master1;
|
||||
INSERT INTO t1 VALUES (108);
|
||||
connection master;
|
||||
XA ROLLBACK '1';
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
include/start_slave.inc
|
||||
connection aux_slave;
|
||||
# Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
formatID gtrid_length bqual_length data
|
||||
1 1 0 1
|
||||
connection backup_slave;
|
||||
BACKUP STAGE START;
|
||||
BACKUP STAGE BLOCK_COMMIT;
|
||||
connection aux_slave;
|
||||
ROLLBACK;
|
||||
connection backup_slave;
|
||||
BACKUP STAGE END;
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/sync_with_master_gtid.inc
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
SET @@global.slave_parallel_threads= @old_parallel_threads;
|
||||
SET @@global.slave_parallel_mode = @old_parallel_mode;
|
||||
include/start_slave.inc
|
||||
connection server_1;
|
||||
DROP TABLE t1;
|
||||
include/rpl_end.inc
|
60
mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result
Normal file
60
mysql-test/suite/rpl/r/rpl_delayed_parallel_slave_sbm.result
Normal file
|
@ -0,0 +1,60 @@
|
|||
include/master-slave.inc
|
||||
[connection master]
|
||||
connection slave;
|
||||
include/stop_slave.inc
|
||||
change master to master_delay=3, master_use_gtid=Slave_Pos;
|
||||
set @@GLOBAL.slave_parallel_threads=2;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
create table t1 (a int);
|
||||
include/sync_slave_sql_with_master.inc
|
||||
#
|
||||
# Pt 1) Ensure SBM is updated immediately upon arrival of the next event
|
||||
# Lock t1 on slave so the first received transaction does not complete/commit
|
||||
connection slave;
|
||||
LOCK TABLES t1 WRITE;
|
||||
connection master;
|
||||
# Sleep 2 to allow a buffer between events for SBM check
|
||||
insert into t1 values (0);
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
# Waiting for transaction to arrive on slave and begin SQL Delay..
|
||||
# Validating SBM is updated on event arrival..
|
||||
# ..done
|
||||
connection slave;
|
||||
UNLOCK TABLES;
|
||||
include/sync_with_master_gtid.inc
|
||||
#
|
||||
# Pt 2) If the SQL thread has not entered an idle state, ensure
|
||||
# following events do not update SBM
|
||||
# Stop slave IO thread so it receives both events together on restart
|
||||
connection slave;
|
||||
include/stop_slave_io.inc
|
||||
connection master;
|
||||
# Sleep 2 to allow a buffer between events for SBM check
|
||||
insert into t1 values (1);
|
||||
# Sleep 3 to create gap between events
|
||||
insert into t1 values (2);
|
||||
connection slave;
|
||||
LOCK TABLES t1 WRITE;
|
||||
START SLAVE IO_THREAD;
|
||||
# Wait for first transaction to complete SQL delay and begin execution..
|
||||
# Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
|
||||
# ..and that SBM wasn't calculated using prior committed transactions
|
||||
# ..done
|
||||
connection slave;
|
||||
UNLOCK TABLES;
|
||||
#
|
||||
# Cleanup
|
||||
# Reset master_delay
|
||||
include/stop_slave.inc
|
||||
CHANGE MASTER TO master_delay=0;
|
||||
set @@GLOBAL.slave_parallel_threads=4;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/rpl_end.inc
|
||||
# End of rpl_delayed_parallel_slave_sbm.test
|
76
mysql-test/suite/rpl/r/rpl_parallel_analyze.result
Normal file
76
mysql-test/suite/rpl/r/rpl_parallel_analyze.result
Normal file
|
@ -0,0 +1,76 @@
|
|||
include/master-slave.inc
|
||||
[connection master]
|
||||
# Initialize
|
||||
connection slave;
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
|
||||
# Setup data
|
||||
connection master;
|
||||
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
|
||||
CREATE TABLE ta (a int);
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
connection master;
|
||||
SET sql_log_bin=0;
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
SET sql_log_bin=1;
|
||||
connection slave;
|
||||
SET sql_log_bin=0;
|
||||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
|
||||
RETURNS INT DETERMINISTIC
|
||||
BEGIN
|
||||
IF d1 != '' THEN
|
||||
SET debug_sync = d1;
|
||||
END IF;
|
||||
IF d2 != '' THEN
|
||||
SET debug_sync = d2;
|
||||
END IF;
|
||||
RETURN x;
|
||||
END
|
||||
||
|
||||
SET sql_log_bin=1;
|
||||
include/stop_slave.inc
|
||||
SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
|
||||
SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
SET GLOBAL slave_parallel_mode=conservative;
|
||||
SET GLOBAL gtid_strict_mode=ON;
|
||||
include/start_slave.inc
|
||||
connection master;
|
||||
SET @old_format= @@SESSION.binlog_format;
|
||||
SET binlog_format=statement;
|
||||
INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
|
||||
ANALYZE TABLE ta;
|
||||
Table Op Msg_type Msg_text
|
||||
test.ta analyze status Engine-independent statistics collected
|
||||
test.ta analyze status Table is already up to date
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
|
||||
info
|
||||
ANALYZE TABLE ta
|
||||
set @@debug_sync="now signal sig_go";
|
||||
include/sync_with_master_gtid.inc
|
||||
# Cleanup
|
||||
connection master;
|
||||
DROP TABLE t1,ta;
|
||||
connection slave;
|
||||
SET DEBUG_SYNC='RESET';
|
||||
connection master;
|
||||
SET DEBUG_SYNC='RESET';
|
||||
DROP FUNCTION foo;
|
||||
include/save_master_gtid.inc
|
||||
connection slave;
|
||||
include/sync_with_master_gtid.inc
|
||||
include/stop_slave.inc
|
||||
SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
|
||||
SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
|
||||
SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
|
||||
include/start_slave.inc
|
||||
include/rpl_end.inc
|
|
@ -15,6 +15,9 @@ CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE = innodb;
|
|||
INSERT INTO t1 VALUES(100);
|
||||
|
||||
--sync_slave_with_master
|
||||
call mtr.add_suppression("Deadlock found when trying to get lock");
|
||||
call mtr.add_suppression("Commit failed due to failure of an earlier commit");
|
||||
|
||||
--source include/stop_slave.inc
|
||||
SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode;
|
||||
|
@ -64,6 +67,32 @@ BACKUP STAGE END;
|
|||
--let $diff_tables= master:t1,slave:t1
|
||||
--source include/diff_tables.inc
|
||||
|
||||
#
|
||||
--echo # MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
# Prove XA "COMPLETE" 'xid' does not dealock similary to the normal trx case.
|
||||
# The slave binlog group commit leader is blocked by a local trx like in
|
||||
# the above normal trx case.
|
||||
# [Notice a reuse of t1,aux_conn from above.]
|
||||
#
|
||||
--let $complete = COMMIT
|
||||
--source parallel_backup_xa.inc
|
||||
--let $complete = ROLLBACK
|
||||
--source parallel_backup_xa.inc
|
||||
|
||||
--let $slave_ooo_error = 1
|
||||
--let $complete = COMMIT
|
||||
--source parallel_backup_xa.inc
|
||||
--connection slave
|
||||
--source include/start_slave.inc
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--let $slave_ooo_error = 1
|
||||
--let $complete = ROLLBACK
|
||||
--source parallel_backup_xa.inc
|
||||
--connection slave
|
||||
--source include/start_slave.inc
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
|
||||
# Clean up.
|
||||
--connection slave
|
||||
|
|
2
mysql-test/suite/rpl/t/parallel_backup_lsu_off-slave.opt
Normal file
2
mysql-test/suite/rpl/t/parallel_backup_lsu_off-slave.opt
Normal file
|
@ -0,0 +1,2 @@
|
|||
--log-slave-updates=0
|
||||
|
7
mysql-test/suite/rpl/t/parallel_backup_lsu_off.test
Normal file
7
mysql-test/suite/rpl/t/parallel_backup_lsu_off.test
Normal file
|
@ -0,0 +1,7 @@
|
|||
#
|
||||
--echo # Specialized --log-slave-updates = 0 version of parallel_backup test.
|
||||
#
|
||||
--echo # MDEV-21953: deadlock between BACKUP STAGE BLOCK_COMMIT and parallel
|
||||
--echo # MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
--let $rpl_skip_reset_master_and_slave = 1
|
||||
--source parallel_backup.test
|
|
@ -0,0 +1 @@
|
|||
--skip-log-bin
|
|
@ -0,0 +1,7 @@
|
|||
#
|
||||
--echo # Specialized --skip-log-bin slave version of parallel_backup test.
|
||||
#
|
||||
--echo # MDEV-21953: deadlock between BACKUP STAGE BLOCK_COMMIT and parallel
|
||||
--echo # MDEV-30423: dealock XA COMMIT vs BACKUP
|
||||
--let $rpl_server_skip_log_bin= 1
|
||||
--source parallel_backup.test
|
79
mysql-test/suite/rpl/t/parallel_backup_xa.inc
Normal file
79
mysql-test/suite/rpl/t/parallel_backup_xa.inc
Normal file
|
@ -0,0 +1,79 @@
|
|||
# Invoked from parallel_backup.test
|
||||
# Parameters:
|
||||
# $complete = COMMIT or ROLLBACK
|
||||
# $slave_ooo_error = 1 means slave group commit did not succeed
|
||||
#
|
||||
--let $kind = Normal
|
||||
if ($slave_ooo_error)
|
||||
{
|
||||
--let $kind = Errored out
|
||||
}
|
||||
--echo #
|
||||
--echo # $kind XA $complete
|
||||
|
||||
--connection slave
|
||||
--source include/stop_slave.inc
|
||||
|
||||
--connection master
|
||||
# val_0 is the first value to insert on master in prepared xa
|
||||
# val_1 is the next one to insert which is the value to block on slave
|
||||
--let $val_0 = `SELECT max(a)+1 FROM t1`
|
||||
--let $val_1 = $val_0
|
||||
--inc $val_1
|
||||
|
||||
--connection aux_slave
|
||||
BEGIN;
|
||||
--eval INSERT INTO t1 VALUES ($val_1)
|
||||
|
||||
--connection master
|
||||
XA START '1';
|
||||
--eval INSERT INTO t1 VALUES ($val_0)
|
||||
XA END '1';
|
||||
XA PREPARE '1';
|
||||
--connection master1
|
||||
--eval INSERT INTO t1 VALUES ($val_1)
|
||||
--connection master
|
||||
--eval XA $complete '1'
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
if ($slave_ooo_error)
|
||||
{
|
||||
SET @sav_innodb_lock_wait_timeout = @@global.innodb_lock_wait_timeout;
|
||||
SET @sav_slave_transaction_retries = @@global.slave_transaction_retries;
|
||||
SET @@global.innodb_lock_wait_timeout =1;
|
||||
SET @@global.slave_transaction_retries=0;
|
||||
}
|
||||
--source include/start_slave.inc
|
||||
--connection aux_slave
|
||||
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
|
||||
--source include/wait_condition.inc
|
||||
--echo # Xid '1' must be in the output:
|
||||
XA RECOVER;
|
||||
--connection backup_slave
|
||||
BACKUP STAGE START;
|
||||
--send BACKUP STAGE BLOCK_COMMIT
|
||||
--connection aux_slave
|
||||
--sleep 1
|
||||
if ($slave_ooo_error)
|
||||
{
|
||||
--let $wait_condition= SELECT COUNT(*) = 0 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
|
||||
--source include/wait_condition.inc
|
||||
}
|
||||
ROLLBACK;
|
||||
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for backup lock"
|
||||
--source include/wait_condition.inc
|
||||
--connection backup_slave
|
||||
--reap
|
||||
BACKUP STAGE END;
|
||||
--connection slave
|
||||
if (!$slave_ooo_error)
|
||||
{
|
||||
--source include/sync_with_master_gtid.inc
|
||||
}
|
||||
--source include/stop_slave.inc
|
||||
if ($slave_ooo_error)
|
||||
{
|
||||
SET @@global.innodb_lock_wait_timeout = @sav_innodb_lock_wait_timeout;
|
||||
SET @@global.slave_transaction_retries= @sav_slave_transaction_retries;
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
--slave-parallel-threads=4
|
133
mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test
Normal file
133
mysql-test/suite/rpl/t/rpl_delayed_parallel_slave_sbm.test
Normal file
|
@ -0,0 +1,133 @@
|
|||
#
|
||||
# This test ensures that after a delayed parallel slave has idled, i.e.
|
||||
# executed everything in its relay log, the next event group that the SQL
|
||||
# thread reads from the relay log will immediately be used in the
|
||||
# Seconds_Behind_Master. In particular, it ensures that the calculation for
|
||||
# Seconds_Behind_Master is based on the timestamp of the new transaction,
|
||||
# rather than the last committed transaction.
|
||||
#
|
||||
# References:
|
||||
# MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel
|
||||
# Replicas
|
||||
#
|
||||
|
||||
--source include/master-slave.inc
|
||||
|
||||
--connection slave
|
||||
--source include/stop_slave.inc
|
||||
--let $master_delay= 3
|
||||
--eval change master to master_delay=$master_delay, master_use_gtid=Slave_Pos
|
||||
--let $old_slave_threads= `SELECT @@GLOBAL.slave_parallel_threads`
|
||||
set @@GLOBAL.slave_parallel_threads=2;
|
||||
--source include/start_slave.inc
|
||||
|
||||
--connection master
|
||||
create table t1 (a int);
|
||||
--source include/sync_slave_sql_with_master.inc
|
||||
|
||||
--echo #
|
||||
--echo # Pt 1) Ensure SBM is updated immediately upon arrival of the next event
|
||||
|
||||
--echo # Lock t1 on slave so the first received transaction does not complete/commit
|
||||
--connection slave
|
||||
LOCK TABLES t1 WRITE;
|
||||
|
||||
--connection master
|
||||
--echo # Sleep 2 to allow a buffer between events for SBM check
|
||||
sleep 2;
|
||||
|
||||
--let $ts_trx_before_ins= `SELECT UNIX_TIMESTAMP()`
|
||||
--let insert_ctr= 0
|
||||
--eval insert into t1 values ($insert_ctr)
|
||||
--inc $insert_ctr
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
|
||||
--echo # Waiting for transaction to arrive on slave and begin SQL Delay..
|
||||
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting until MASTER_DELAY seconds after master executed event';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--echo # Validating SBM is updated on event arrival..
|
||||
--let $sbm_trx1_arrive= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
|
||||
--let $seconds_since_idling= `SELECT UNIX_TIMESTAMP() - $ts_trx_before_ins`
|
||||
if (`SELECT $sbm_trx1_arrive > ($seconds_since_idling + 1)`)
|
||||
{
|
||||
--echo # SBM was $sbm_trx1_arrive yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
|
||||
--die Seconds_Behind_Master should reset after idling
|
||||
}
|
||||
--echo # ..done
|
||||
|
||||
--connection slave
|
||||
UNLOCK TABLES;
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--echo #
|
||||
--echo # Pt 2) If the SQL thread has not entered an idle state, ensure
|
||||
--echo # following events do not update SBM
|
||||
|
||||
--echo # Stop slave IO thread so it receives both events together on restart
|
||||
--connection slave
|
||||
--source include/stop_slave_io.inc
|
||||
|
||||
--connection master
|
||||
|
||||
--echo # Sleep 2 to allow a buffer between events for SBM check
|
||||
sleep 2;
|
||||
--let $ts_trxpt2_before_ins= `SELECT UNIX_TIMESTAMP()`
|
||||
--eval insert into t1 values ($insert_ctr)
|
||||
--inc $insert_ctr
|
||||
--echo # Sleep 3 to create gap between events
|
||||
sleep 3;
|
||||
--eval insert into t1 values ($insert_ctr)
|
||||
--inc $insert_ctr
|
||||
--let $ts_trx_after_ins= `SELECT UNIX_TIMESTAMP()`
|
||||
|
||||
--connection slave
|
||||
LOCK TABLES t1 WRITE;
|
||||
|
||||
START SLAVE IO_THREAD;
|
||||
|
||||
--echo # Wait for first transaction to complete SQL delay and begin execution..
|
||||
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for table metadata lock%' AND command LIKE 'Slave_Worker';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--echo # Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
|
||||
--let $sbm_after_trx_no_idle= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
|
||||
--let $timestamp_trxpt2_arrive= `SELECT UNIX_TIMESTAMP()`
|
||||
if (`SELECT $sbm_after_trx_no_idle < $timestamp_trxpt2_arrive - $ts_trx_after_ins`)
|
||||
{
|
||||
--let $cmpv= `SELECT $timestamp_trxpt2_arrive - $ts_trx_after_ins`
|
||||
--echo # SBM $sbm_after_trx_no_idle was more recent than time since last transaction ($cmpv seconds)
|
||||
--die Seconds_Behind_Master should not have used second transaction timestamp
|
||||
}
|
||||
--let $seconds_since_idling= `SELECT ($timestamp_trxpt2_arrive - $ts_trxpt2_before_ins)`
|
||||
--echo # ..and that SBM wasn't calculated using prior committed transactions
|
||||
if (`SELECT $sbm_after_trx_no_idle > ($seconds_since_idling + 1)`)
|
||||
{
|
||||
--echo # SBM was $sbm_after_trx_no_idle yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
|
||||
--die Seconds_Behind_Master calculation should not have used prior committed transaction
|
||||
}
|
||||
--echo # ..done
|
||||
|
||||
--connection slave
|
||||
UNLOCK TABLES;
|
||||
|
||||
--echo #
|
||||
--echo # Cleanup
|
||||
|
||||
--echo # Reset master_delay
|
||||
--source include/stop_slave.inc
|
||||
--eval CHANGE MASTER TO master_delay=0
|
||||
--eval set @@GLOBAL.slave_parallel_threads=$old_slave_threads
|
||||
--source include/start_slave.inc
|
||||
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--source include/rpl_end.inc
|
||||
--echo # End of rpl_delayed_parallel_slave_sbm.test
|
84
mysql-test/suite/rpl/t/rpl_parallel_analyze.test
Normal file
84
mysql-test/suite/rpl/t/rpl_parallel_analyze.test
Normal file
|
@ -0,0 +1,84 @@
|
|||
# The test file is created to prove fixes to
|
||||
# MDEV-30323 Some DDLs like ANALYZE can complete on parallel slave out of order
|
||||
# Debug-sync tests aiming at parallel replication of ADMIN commands
|
||||
# are welcome here.
|
||||
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/master-slave.inc
|
||||
|
||||
--echo # Initialize
|
||||
--connection slave
|
||||
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
|
||||
|
||||
--echo # Setup data
|
||||
--connection master
|
||||
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
|
||||
CREATE TABLE ta (a int);
|
||||
--let $pre_load_gtid=`SELECT @@last_gtid`
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--source suite/rpl/include/create_or_drop_sync_func.inc
|
||||
|
||||
# configure MDEV-30323 slave
|
||||
--source include/stop_slave.inc
|
||||
SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
|
||||
SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
|
||||
SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
|
||||
SET GLOBAL slave_parallel_threads=10;
|
||||
SET GLOBAL slave_parallel_mode=conservative;
|
||||
SET GLOBAL gtid_strict_mode=ON;
|
||||
--source include/start_slave.inc
|
||||
|
||||
# MDEV-30323 setup needs two group of events the first of which is a DML
|
||||
# and ANALYZE is the 2nd.
|
||||
# The latter is made to race in slave execution over the DML thanks
|
||||
# to a DML latency simulation.
|
||||
# In the fixed case the race-over should not be a problem: ultimately
|
||||
# ANALYZE must wait for its turn to update slave@@global.gtid_binlog_pos.
|
||||
# Otherwise the reported OOO error must be issued.
|
||||
|
||||
--connection master
|
||||
SET @old_format= @@SESSION.binlog_format;
|
||||
SET binlog_format=statement;
|
||||
INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
|
||||
|
||||
ANALYZE TABLE ta;
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
|
||||
--source include/wait_condition.inc
|
||||
|
||||
SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
|
||||
if (`select strcmp(@@global.gtid_binlog_pos, '$pre_load_gtid') <> 0 or strcmp(@@global.gtid_slave_pos, '$pre_load_gtid') <> 0`)
|
||||
{
|
||||
--let $bs=`SELECT @@global.gtid_binlog_pos`
|
||||
--let $es=`SELECT @@global.gtid_slave_pos`
|
||||
--echo Mismatch between expected $pre_load_gtid state and the actual binlog state " @@global.gtid_binlog_pos = $bs or/and slave execution state @@global.gtid_slave_pos = $es.
|
||||
--die
|
||||
}
|
||||
|
||||
set @@debug_sync="now signal sig_go";
|
||||
--source include/sync_with_master_gtid.inc
|
||||
|
||||
--echo # Cleanup
|
||||
--connection master
|
||||
DROP TABLE t1,ta;
|
||||
--let $create_or_drop=drop
|
||||
--source suite/rpl/include/create_or_drop_sync_func.inc
|
||||
|
||||
--source include/save_master_gtid.inc
|
||||
|
||||
--connection slave
|
||||
--source include/sync_with_master_gtid.inc
|
||||
--source include/stop_slave.inc
|
||||
SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
|
||||
SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
|
||||
SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
|
||||
--source include/start_slave.inc
|
||||
|
||||
--source include/rpl_end.inc
|
|
@ -64,9 +64,9 @@ public:
|
|||
{
|
||||
return memcmp(a + m_memory_pos, b + m_memory_pos, m_length);
|
||||
}
|
||||
void hash_record(const uchar *ptr, ulong *nr, ulong *nr2) const
|
||||
void hash_record(const uchar *ptr, Hasher *hasher) const
|
||||
{
|
||||
my_charset_bin.hash_sort(ptr + m_record_pos, m_length, nr, nr2);
|
||||
hasher->add(&my_charset_bin, ptr + m_record_pos, m_length);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -119,13 +119,13 @@ public:
|
|||
|
||||
put values into the same partition.
|
||||
*/
|
||||
static void hash_record(const uchar *ptr, ulong *nr, ulong *nr2)
|
||||
static void hash_record(const uchar *ptr, Hasher *hasher)
|
||||
{
|
||||
segment(0).hash_record(ptr, nr, nr2);
|
||||
segment(1).hash_record(ptr, nr, nr2);
|
||||
segment(2).hash_record(ptr, nr, nr2);
|
||||
segment(3).hash_record(ptr, nr, nr2);
|
||||
segment(4).hash_record(ptr, nr, nr2);
|
||||
segment(0).hash_record(ptr, hasher);
|
||||
segment(1).hash_record(ptr, hasher);
|
||||
segment(2).hash_record(ptr, hasher);
|
||||
segment(3).hash_record(ptr, hasher);
|
||||
segment(4).hash_record(ptr, hasher);
|
||||
}
|
||||
|
||||
// Compare two in-memory values
|
||||
|
|
|
@ -235,6 +235,15 @@ FOREACH(se aria partition perfschema sql_sequence wsrep)
|
|||
ENDIF()
|
||||
ENDFOREACH()
|
||||
|
||||
IF(VISIBILITY_HIDDEN_FLAG AND TARGET partition AND WITH_UBSAN)
|
||||
# the spider plugin needs some partition symbols from inside mysqld
|
||||
# when built with ubsan, in which case we need to remove
|
||||
# -fvisibility=hidden from partition
|
||||
GET_TARGET_PROPERTY(f partition COMPILE_FLAGS)
|
||||
STRING(REPLACE "${VISIBILITY_HIDDEN_FLAG}" "" f ${f})
|
||||
SET_TARGET_PROPERTIES(partition PROPERTIES COMPILE_FLAGS "${f}")
|
||||
ENDIF()
|
||||
|
||||
IF(WIN32)
|
||||
SET(MYSQLD_SOURCE main.cc message.rc)
|
||||
ELSE()
|
||||
|
|
|
@ -28,7 +28,7 @@ int writefile(const char *path, const char *db, const char *table,
|
|||
inline void deletefrm(const char *path)
|
||||
{
|
||||
char frm_name[FN_REFLEN];
|
||||
strxmov(frm_name, path, reg_ext, NullS);
|
||||
strxnmov(frm_name, sizeof(frm_name)-1, path, reg_ext, NullS);
|
||||
mysql_file_delete(key_file_frm, frm_name, MYF(0));
|
||||
}
|
||||
|
||||
|
|
73
sql/field.cc
73
sql/field.cc
|
@ -1924,17 +1924,11 @@ Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
|
|||
}
|
||||
|
||||
|
||||
void Field::hash(ulong *nr, ulong *nr2)
|
||||
void Field::hash_not_null(Hasher *hasher)
|
||||
{
|
||||
if (is_null())
|
||||
{
|
||||
*nr^= (*nr << 1) | 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint len= pack_length();
|
||||
sort_charset()->hash_sort(ptr, len, nr, nr2);
|
||||
}
|
||||
DBUG_ASSERT(marked_for_read());
|
||||
DBUG_ASSERT(!is_null());
|
||||
hasher->add(sort_charset(), ptr, pack_length());
|
||||
}
|
||||
|
||||
size_t
|
||||
|
@ -8347,17 +8341,12 @@ bool Field_varstring::is_equal(const Column_definition &new_field) const
|
|||
}
|
||||
|
||||
|
||||
void Field_varstring::hash(ulong *nr, ulong *nr2)
|
||||
void Field_varstring::hash_not_null(Hasher *hasher)
|
||||
{
|
||||
if (is_null())
|
||||
{
|
||||
*nr^= (*nr << 1) | 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint len= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
|
||||
charset()->hash_sort(ptr + length_bytes, len, nr, nr2);
|
||||
}
|
||||
DBUG_ASSERT(marked_for_read());
|
||||
DBUG_ASSERT(!is_null());
|
||||
uint len= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
|
||||
hasher->add(charset(), ptr + length_bytes, len);
|
||||
}
|
||||
|
||||
|
||||
|
@ -8732,6 +8721,17 @@ oom_error:
|
|||
}
|
||||
|
||||
|
||||
void Field_blob::hash_not_null(Hasher *hasher)
|
||||
{
|
||||
DBUG_ASSERT(marked_for_read());
|
||||
DBUG_ASSERT(!is_null());
|
||||
char *blob;
|
||||
memcpy(&blob, ptr + packlength, sizeof(char*));
|
||||
if (blob)
|
||||
hasher->add(Field_blob::charset(), blob, get_length(ptr));
|
||||
}
|
||||
|
||||
|
||||
double Field_blob::val_real(void)
|
||||
{
|
||||
DBUG_ASSERT(marked_for_read());
|
||||
|
@ -9794,20 +9794,27 @@ const DTCollation & Field_bit::dtcollation() const
|
|||
}
|
||||
|
||||
|
||||
void Field_bit::hash(ulong *nr, ulong *nr2)
|
||||
/*
|
||||
This method always calculates hash over 8 bytes.
|
||||
This is different from how the HEAP engine calculate hash:
|
||||
HEAP takes into account the actual octet size, so say for BIT(18)
|
||||
it calculates hash over three bytes only:
|
||||
- the incomplete byte with bits 16..17
|
||||
- the two full bytes with bits 0..15
|
||||
See hp_rec_hashnr(), hp_hashnr() for details.
|
||||
|
||||
The HEAP way is more efficient, especially for short lengths.
|
||||
Let's consider fixing Field_bit eventually to do it in the HEAP way,
|
||||
with proper measures to upgrade partitioned tables easy.
|
||||
*/
|
||||
void Field_bit::hash_not_null(Hasher *hasher)
|
||||
{
|
||||
if (is_null())
|
||||
{
|
||||
*nr^= (*nr << 1) | 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
CHARSET_INFO *cs= &my_charset_bin;
|
||||
longlong value= Field_bit::val_int();
|
||||
uchar tmp[8];
|
||||
mi_int8store(tmp,value);
|
||||
cs->hash_sort(tmp, 8, nr, nr2);
|
||||
}
|
||||
DBUG_ASSERT(marked_for_read());
|
||||
DBUG_ASSERT(!is_null());
|
||||
longlong value= Field_bit::val_int();
|
||||
uchar tmp[8];
|
||||
mi_int8store(tmp,value);
|
||||
hasher->add(&my_charset_bin, tmp, 8);
|
||||
}
|
||||
|
||||
|
||||
|
|
14
sql/field.h
14
sql/field.h
|
@ -1845,7 +1845,14 @@ public:
|
|||
key_map get_possible_keys();
|
||||
|
||||
/* Hash value */
|
||||
virtual void hash(ulong *nr, ulong *nr2);
|
||||
void hash(Hasher *hasher)
|
||||
{
|
||||
if (is_null())
|
||||
hasher->add_null();
|
||||
else
|
||||
hash_not_null(hasher);
|
||||
}
|
||||
virtual void hash_not_null(Hasher *hasher);
|
||||
|
||||
/**
|
||||
Get the upper limit of the MySQL integral and floating-point type.
|
||||
|
@ -4221,7 +4228,7 @@ public:
|
|||
uchar *new_ptr, uint32 length,
|
||||
uchar *new_null_ptr, uint new_null_bit) override;
|
||||
bool is_equal(const Column_definition &new_field) const override;
|
||||
void hash(ulong *nr, ulong *nr2) override;
|
||||
void hash_not_null(Hasher *hasher) override;
|
||||
uint length_size() const override { return length_bytes; }
|
||||
void print_key_value(String *out, uint32 length) override;
|
||||
Binlog_type_info binlog_type_info() const override;
|
||||
|
@ -4481,6 +4488,7 @@ public:
|
|||
bool make_empty_rec_store_default_value(THD *thd, Item *item) override;
|
||||
int store(const char *to, size_t length, CHARSET_INFO *charset) override;
|
||||
using Field_str::store;
|
||||
void hash_not_null(Hasher *hasher) override;
|
||||
double val_real() override;
|
||||
longlong val_int() override;
|
||||
String *val_str(String *, String *) override;
|
||||
|
@ -5051,7 +5059,7 @@ public:
|
|||
if (bit_ptr)
|
||||
bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*);
|
||||
}
|
||||
void hash(ulong *nr, ulong *nr2) override;
|
||||
void hash_not_null(Hasher *hasher) override;
|
||||
|
||||
SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
|
||||
const Item_bool_func *cond,
|
||||
|
|
|
@ -10001,8 +10001,7 @@ uint8 ha_partition::table_cache_type()
|
|||
|
||||
uint32 ha_partition::calculate_key_hash_value(Field **field_array)
|
||||
{
|
||||
ulong nr1= 1;
|
||||
ulong nr2= 4;
|
||||
Hasher hasher;
|
||||
bool use_51_hash;
|
||||
use_51_hash= MY_TEST((*field_array)->table->part_info->key_algorithm ==
|
||||
partition_info::KEY_ALGORITHM_51);
|
||||
|
@ -10029,12 +10028,12 @@ uint32 ha_partition::calculate_key_hash_value(Field **field_array)
|
|||
{
|
||||
if (field->is_null())
|
||||
{
|
||||
nr1^= (nr1 << 1) | 1;
|
||||
hasher.add_null();
|
||||
continue;
|
||||
}
|
||||
/* Force this to my_hash_sort_bin, which was used in 5.1! */
|
||||
uint len= field->pack_length();
|
||||
my_charset_bin.hash_sort(field->ptr, len, &nr1, &nr2);
|
||||
hasher.add(&my_charset_bin, field->ptr, len);
|
||||
/* Done with this field, continue with next one. */
|
||||
continue;
|
||||
}
|
||||
|
@ -10052,12 +10051,12 @@ uint32 ha_partition::calculate_key_hash_value(Field **field_array)
|
|||
{
|
||||
if (field->is_null())
|
||||
{
|
||||
nr1^= (nr1 << 1) | 1;
|
||||
hasher.add_null();
|
||||
continue;
|
||||
}
|
||||
/* Force this to my_hash_sort_bin, which was used in 5.1! */
|
||||
uint len= field->pack_length();
|
||||
my_charset_latin1.hash_sort(field->ptr, len, &nr1, &nr2);
|
||||
hasher.add(&my_charset_latin1, field->ptr, len);
|
||||
continue;
|
||||
}
|
||||
/* New types in mysql-5.6. */
|
||||
|
@ -10084,9 +10083,9 @@ uint32 ha_partition::calculate_key_hash_value(Field **field_array)
|
|||
}
|
||||
/* fall through, use collation based hashing. */
|
||||
}
|
||||
field->hash(&nr1, &nr2);
|
||||
field->hash(&hasher);
|
||||
} while (*(++field_array));
|
||||
return (uint32) nr1;
|
||||
return (uint32) hasher.finalize();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -4699,6 +4699,35 @@ int handler::check_collation_compatibility()
|
|||
}
|
||||
|
||||
|
||||
int handler::check_long_hash_compatibility() const
|
||||
{
|
||||
if (!table->s->old_long_hash_function())
|
||||
return 0;
|
||||
KEY *key= table->key_info;
|
||||
KEY *key_end= key + table->s->keys;
|
||||
for ( ; key < key_end; key++)
|
||||
{
|
||||
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
/*
|
||||
The old (pre-MDEV-27653) hash function was wrong.
|
||||
So the long hash unique constraint can have some
|
||||
duplicate records. REPAIR TABLE can't fix this,
|
||||
it will fail on a duplicate key error.
|
||||
Only "ALTER IGNORE TABLE .. FORCE" can fix this.
|
||||
So we need to return HA_ADMIN_NEEDS_ALTER here,
|
||||
(not HA_ADMIN_NEEDS_UPGRADE which is used elsewhere),
|
||||
to properly send the error message text corresponding
|
||||
to ER_TABLE_NEEDS_REBUILD (rather than to ER_TABLE_NEEDS_UPGRADE)
|
||||
to the user.
|
||||
*/
|
||||
return HA_ADMIN_NEEDS_ALTER;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
|
||||
{
|
||||
int error;
|
||||
|
@ -4736,6 +4765,9 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
|
|||
|
||||
if (unlikely((error= check_collation_compatibility())))
|
||||
return error;
|
||||
|
||||
if (unlikely((error= check_long_hash_compatibility())))
|
||||
return error;
|
||||
|
||||
return check_for_upgrade(check_opt);
|
||||
}
|
||||
|
|
|
@ -3522,6 +3522,7 @@ public:
|
|||
}
|
||||
|
||||
int check_collation_compatibility();
|
||||
int check_long_hash_compatibility() const;
|
||||
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
|
||||
/** to be actually called to get 'check()' functionality*/
|
||||
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
|
||||
|
|
|
@ -10815,7 +10815,7 @@ table_map Item_direct_view_ref::used_tables() const
|
|||
table_map used= (*ref)->used_tables();
|
||||
return (used ?
|
||||
used :
|
||||
((null_ref_table != NO_NULL_TABLE) ?
|
||||
(null_ref_table != NO_NULL_TABLE && !null_ref_table->const_table ?
|
||||
null_ref_table->map :
|
||||
(table_map)0 ));
|
||||
}
|
||||
|
|
13
sql/item.h
13
sql/item.h
|
@ -1499,6 +1499,12 @@ public:
|
|||
*/
|
||||
inline ulonglong val_uint() { return (ulonglong) val_int(); }
|
||||
|
||||
virtual bool hash_not_null(Hasher *hasher)
|
||||
{
|
||||
DBUG_ASSERT(0);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
Return string representation of this item object.
|
||||
|
||||
|
@ -3691,6 +3697,13 @@ public:
|
|||
{
|
||||
return Sql_mode_dependency(0, field->value_depends_on_sql_mode());
|
||||
}
|
||||
bool hash_not_null(Hasher *hasher) override
|
||||
{
|
||||
if (field->is_null())
|
||||
return true;
|
||||
field->hash_not_null(hasher);
|
||||
return false;
|
||||
}
|
||||
longlong val_int_endpoint(bool left_endp, bool *incl_endp) override;
|
||||
bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override;
|
||||
bool get_date_result(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate)
|
||||
|
|
|
@ -4909,38 +4909,18 @@ Item_cond::fix_fields(THD *thd, Item **ref)
|
|||
|
||||
if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
|
||||
return TRUE; // Fatal error flag is set!
|
||||
/*
|
||||
The following optimization reduces the depth of an AND-OR tree.
|
||||
E.g. a WHERE clause like
|
||||
F1 AND (F2 AND (F2 AND F4))
|
||||
is parsed into a tree with the same nested structure as defined
|
||||
by braces. This optimization will transform such tree into
|
||||
AND (F1, F2, F3, F4).
|
||||
Trees of OR items are flattened as well:
|
||||
((F1 OR F2) OR (F3 OR F4)) => OR (F1, F2, F3, F4)
|
||||
Items for removed AND/OR levels will dangle until the death of the
|
||||
entire statement.
|
||||
The optimization is currently prepared statements and stored procedures
|
||||
friendly as it doesn't allocate any memory and its effects are durable
|
||||
(i.e. do not depend on PS/SP arguments).
|
||||
*/
|
||||
while ((item=li++))
|
||||
|
||||
while (li++)
|
||||
{
|
||||
while (item->type() == Item::COND_ITEM &&
|
||||
((Item_cond*) item)->functype() == functype() &&
|
||||
!((Item_cond*) item)->list.is_empty())
|
||||
{ // Identical function
|
||||
li.replace(((Item_cond*) item)->list);
|
||||
((Item_cond*) item)->list.empty();
|
||||
item= *li.ref(); // new current item
|
||||
}
|
||||
merge_sub_condition(li);
|
||||
item= *li.ref();
|
||||
if (is_top_level_item())
|
||||
item->top_level_item();
|
||||
|
||||
/*
|
||||
replace degraded condition:
|
||||
was: <field>
|
||||
become: <field> = 1
|
||||
become: <field> != 0
|
||||
*/
|
||||
Item::Type type= item->type();
|
||||
if (type == Item::FIELD_ITEM || type == Item::REF_ITEM)
|
||||
|
@ -4956,7 +4936,9 @@ Item_cond::fix_fields(THD *thd, Item **ref)
|
|||
|
||||
if (item->fix_fields_if_needed_for_bool(thd, li.ref()))
|
||||
return TRUE; /* purecov: inspected */
|
||||
item= *li.ref(); // item can be substituted in fix_fields
|
||||
merge_sub_condition(li);
|
||||
item= *li.ref(); // may be substituted in fix_fields/merge_item_if_possible
|
||||
|
||||
used_tables_cache|= item->used_tables();
|
||||
if (item->can_eval_in_optimize() && !item->with_sp_var() &&
|
||||
!cond_has_datetime_is_null(item))
|
||||
|
@ -5003,6 +4985,55 @@ Item_cond::fix_fields(THD *thd, Item **ref)
|
|||
return FALSE;
|
||||
}
|
||||
|
||||
/**
|
||||
@brief
|
||||
Merge a lower-level condition pointed by the iterator into this Item_cond
|
||||
if possible
|
||||
|
||||
@param li list iterator pointing to condition that must be
|
||||
examined and merged if possible.
|
||||
|
||||
@details
|
||||
If an item pointed by the iterator is an instance of Item_cond with the
|
||||
same functype() as this Item_cond (i.e. both are Item_cond_and or both are
|
||||
Item_cond_or) then the arguments of that lower-level item can be merged
|
||||
into the list of arguments of this upper-level Item_cond.
|
||||
|
||||
This optimization reduces the depth of an AND-OR tree.
|
||||
E.g. a WHERE clause like
|
||||
F1 AND (F2 AND (F2 AND F4))
|
||||
is parsed into a tree with the same nested structure as defined
|
||||
by braces. This optimization will transform such tree into
|
||||
AND (F1, F2, F3, F4).
|
||||
Trees of OR items are flattened as well:
|
||||
((F1 OR F2) OR (F3 OR F4)) => OR (F1, F2, F3, F4)
|
||||
Items for removed AND/OR levels will dangle until the death of the
|
||||
entire statement.
|
||||
|
||||
The optimization is currently prepared statements and stored procedures
|
||||
friendly as it doesn't allocate any memory and its effects are durable
|
||||
(i.e. do not depend on PS/SP arguments).
|
||||
*/
|
||||
void Item_cond::merge_sub_condition(List_iterator<Item>& li)
|
||||
{
|
||||
Item *item= *li.ref();
|
||||
|
||||
/*
|
||||
The check for list.is_empty() is to catch empty Item_cond_and() items.
|
||||
We may encounter Item_cond_and with an empty list, because optimizer code
|
||||
strips multiple equalities, combines items, then adds multiple equalities
|
||||
back
|
||||
*/
|
||||
while (item->type() == Item::COND_ITEM &&
|
||||
((Item_cond*) item)->functype() == functype() &&
|
||||
!((Item_cond*) item)->list.is_empty())
|
||||
{
|
||||
li.replace(((Item_cond*) item)->list);
|
||||
((Item_cond*) item)->list.empty();
|
||||
item= *li.ref();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
Item_cond::eval_not_null_tables(void *opt_arg)
|
||||
|
|
|
@ -3212,6 +3212,9 @@ public:
|
|||
Item *build_clone(THD *thd) override;
|
||||
bool excl_dep_on_table(table_map tab_map) override;
|
||||
bool excl_dep_on_grouping_fields(st_select_lex *sel) override;
|
||||
|
||||
private:
|
||||
void merge_sub_condition(List_iterator<Item>& li);
|
||||
};
|
||||
|
||||
template <template<class> class LI, class T> class Item_equal_iterator;
|
||||
|
|
|
@ -1767,7 +1767,7 @@ static void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
|
|||
cs->hash_sort((uchar *)str->ptr(), str->length(), &nr1, &nr2);
|
||||
}
|
||||
|
||||
longlong Item_func_hash::val_int()
|
||||
longlong Item_func_hash_mariadb_100403::val_int()
|
||||
{
|
||||
DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
|
||||
unsigned_flag= true;
|
||||
|
@ -1788,6 +1788,24 @@ longlong Item_func_hash::val_int()
|
|||
}
|
||||
|
||||
|
||||
longlong Item_func_hash::val_int()
|
||||
{
|
||||
DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
|
||||
unsigned_flag= true;
|
||||
Hasher hasher;
|
||||
for(uint i= 0;i<arg_count;i++)
|
||||
{
|
||||
if (args[i]->hash_not_null(&hasher))
|
||||
{
|
||||
null_value= 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
null_value= 0;
|
||||
return (longlong) hasher.finalize();
|
||||
}
|
||||
|
||||
|
||||
bool Item_func_hash::fix_length_and_dec(THD *thd)
|
||||
{
|
||||
decimals= 0;
|
||||
|
|
|
@ -1209,6 +1209,18 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
class Item_func_hash_mariadb_100403: public Item_func_hash
|
||||
{
|
||||
public:
|
||||
Item_func_hash_mariadb_100403(THD *thd, List<Item> &item)
|
||||
:Item_func_hash(thd, item)
|
||||
{}
|
||||
longlong val_int();
|
||||
Item *get_copy(THD *thd)
|
||||
{ return get_item_copy<Item_func_hash_mariadb_100403>(thd, this); }
|
||||
const char *func_name() const { return "<hash_mariadb_100403>"; }
|
||||
};
|
||||
|
||||
class Item_longlong_func: public Item_int_func
|
||||
{
|
||||
public:
|
||||
|
|
|
@ -1848,6 +1848,18 @@ bool Item_func_ucase::fix_length_and_dec(THD *thd)
|
|||
}
|
||||
|
||||
|
||||
bool Item_func_left::hash_not_null(Hasher *hasher)
|
||||
{
|
||||
StringBuffer<STRING_BUFFER_USUAL_SIZE> buf;
|
||||
String *str= val_str(&buf);
|
||||
DBUG_ASSERT((str == NULL) == null_value);
|
||||
if (!str)
|
||||
return true;
|
||||
hasher->add(collation.collation, str->ptr(), str->length());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
String *Item_func_left::val_str(String *str)
|
||||
{
|
||||
DBUG_ASSERT(fixed());
|
||||
|
|
|
@ -582,6 +582,7 @@ class Item_func_left :public Item_str_func
|
|||
String tmp_value;
|
||||
public:
|
||||
Item_func_left(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
|
||||
bool hash_not_null(Hasher *hasher) override;
|
||||
String *val_str(String *) override;
|
||||
bool fix_length_and_dec(THD *thd) override;
|
||||
LEX_CSTRING func_name_cstring() const override
|
||||
|
|
|
@ -367,7 +367,14 @@ public:
|
|||
int8 aggr_level; /* nesting level of the aggregating subquery */
|
||||
int8 max_arg_level; /* max level of unbound column references */
|
||||
int8 max_sum_func_level;/* max level of aggregation for embedded functions */
|
||||
bool quick_group; /* If incremental update of fields */
|
||||
|
||||
/*
|
||||
true (the default value) means this aggregate function can be computed
|
||||
with TemporaryTableWithPartialSums algorithm (see end_update()).
|
||||
false means this aggregate function needs OrderedGroupBy algorithm (see
|
||||
end_write_group()).
|
||||
*/
|
||||
bool quick_group;
|
||||
/*
|
||||
This list is used by the check for mixing non aggregated fields and
|
||||
sum functions in the ONLY_FULL_GROUP_BY_MODE. We save all outer fields
|
||||
|
|
|
@ -1801,6 +1801,7 @@ int binlog_init(void *p)
|
|||
binlog_hton->prepare= binlog_prepare;
|
||||
binlog_hton->start_consistent_snapshot= binlog_start_consistent_snapshot;
|
||||
}
|
||||
|
||||
binlog_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN | HTON_NO_ROLLBACK;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2119,7 +2120,9 @@ int binlog_commit_by_xid(handlerton *hton, XID *xid)
|
|||
THD *thd= current_thd;
|
||||
|
||||
if (thd->is_current_stmt_binlog_disabled())
|
||||
return 0;
|
||||
{
|
||||
return thd->wait_for_prior_commit();
|
||||
}
|
||||
|
||||
/* the asserted state can't be reachable with xa commit */
|
||||
DBUG_ASSERT(!thd->get_stmt_da()->is_error() ||
|
||||
|
@ -2151,7 +2154,9 @@ int binlog_rollback_by_xid(handlerton *hton, XID *xid)
|
|||
THD *thd= current_thd;
|
||||
|
||||
if (thd->is_current_stmt_binlog_disabled())
|
||||
return 0;
|
||||
{
|
||||
return thd->wait_for_prior_commit();
|
||||
}
|
||||
|
||||
if (thd->get_stmt_da()->is_error() &&
|
||||
thd->get_stmt_da()->sql_errno() == ER_XA_RBROLLBACK)
|
||||
|
|
|
@ -313,7 +313,7 @@ static char *get_plugindir()
|
|||
{
|
||||
static char plugin_dir[2*MAX_PATH];
|
||||
get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path, '/');
|
||||
strcat(plugin_dir, "/" STR(INSTALL_PLUGINDIR));
|
||||
safe_strcat(plugin_dir, sizeof(plugin_dir), "/" STR(INSTALL_PLUGINDIR));
|
||||
|
||||
if (access(plugin_dir, 0) == 0)
|
||||
return plugin_dir;
|
||||
|
|
|
@ -5057,12 +5057,11 @@ static int init_server_components()
|
|||
else // full wsrep initialization
|
||||
{
|
||||
// add basedir/bin to PATH to resolve wsrep script names
|
||||
char* const tmp_path= (char*)my_alloca(strlen(mysql_home) +
|
||||
strlen("/bin") + 1);
|
||||
size_t tmp_path_size= strlen(mysql_home) + 5; /* including "/bin" */
|
||||
char* const tmp_path= (char*)my_alloca(tmp_path_size);
|
||||
if (tmp_path)
|
||||
{
|
||||
strcpy(tmp_path, mysql_home);
|
||||
strcat(tmp_path, "/bin");
|
||||
snprintf(tmp_path, tmp_path_size, "%s/bin", mysql_home);
|
||||
wsrep_prepend_PATH(tmp_path);
|
||||
}
|
||||
else
|
||||
|
@ -5893,8 +5892,9 @@ int mysqld_main(int argc, char **argv)
|
|||
char real_server_version[2 * SERVER_VERSION_LENGTH + 10];
|
||||
|
||||
set_server_version(real_server_version, sizeof(real_server_version));
|
||||
strcat(real_server_version, "' as '");
|
||||
strcat(real_server_version, server_version);
|
||||
safe_strcat(real_server_version, sizeof(real_server_version), "' as '");
|
||||
safe_strcat(real_server_version, sizeof(real_server_version),
|
||||
server_version);
|
||||
|
||||
sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname,
|
||||
real_server_version,
|
||||
|
@ -7880,7 +7880,8 @@ static int mysql_init_variables(void)
|
|||
}
|
||||
else
|
||||
my_path(prg_dev, my_progname, "mysql/bin");
|
||||
strcat(prg_dev,"/../"); // Remove 'bin' to get base dir
|
||||
// Remove 'bin' to get base dir
|
||||
safe_strcat(prg_dev, sizeof(prg_dev), "/../");
|
||||
cleanup_dirname(mysql_home,prg_dev);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -56,8 +56,7 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev,
|
|||
rgi->event_relay_log_pos= qev->event_relay_log_pos;
|
||||
rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos;
|
||||
strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name);
|
||||
if (!(ev->is_artificial_event() || ev->is_relay_log_event() ||
|
||||
(ev->when == 0)))
|
||||
if (event_can_update_last_master_timestamp(ev))
|
||||
rgi->last_master_timestamp= ev->when + (time_t)ev->exec_time;
|
||||
err= apply_event_and_update_pos_for_parallel(ev, thd, rgi);
|
||||
|
||||
|
|
|
@ -2296,11 +2296,9 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
|
|||
|
||||
if (unlikely(error))
|
||||
{
|
||||
/*
|
||||
trans_rollback above does not rollback XA transactions
|
||||
(todo/fixme consider to do so.
|
||||
*/
|
||||
if (thd->transaction->xid_state.is_explicit_XA())
|
||||
// leave alone any XA prepared transactions
|
||||
if (thd->transaction->xid_state.is_explicit_XA() &&
|
||||
thd->transaction->xid_state.get_state_code() != XA_PREPARED)
|
||||
xa_trans_force_rollback(thd);
|
||||
|
||||
thd->release_transactional_locks();
|
||||
|
|
31
sql/slave.cc
31
sql/slave.cc
|
@ -4286,10 +4286,10 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
the user might be surprised to see a claim that the slave is up to date
|
||||
long before those queued events are actually executed.
|
||||
*/
|
||||
if (!rli->mi->using_parallel() &&
|
||||
!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
|
||||
if ((!rli->mi->using_parallel()) && event_can_update_last_master_timestamp(ev))
|
||||
{
|
||||
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
|
||||
rli->sql_thread_caught_up= false;
|
||||
DBUG_ASSERT(rli->last_master_timestamp >= 0);
|
||||
}
|
||||
|
||||
|
@ -4341,6 +4341,17 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
|
||||
if (rli->mi->using_parallel())
|
||||
{
|
||||
if (unlikely((rli->last_master_timestamp == 0 ||
|
||||
rli->sql_thread_caught_up) &&
|
||||
event_can_update_last_master_timestamp(ev)))
|
||||
{
|
||||
if (rli->last_master_timestamp < ev->when)
|
||||
{
|
||||
rli->last_master_timestamp= ev->when;
|
||||
rli->sql_thread_caught_up= false;
|
||||
}
|
||||
}
|
||||
|
||||
int res= rli->parallel.do_event(serial_rgi, ev, event_size);
|
||||
/*
|
||||
In parallel replication, we need to update the relay log position
|
||||
|
@ -4361,7 +4372,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
This is the case for pre-10.0 events without GTID, and for handling
|
||||
slave_skip_counter.
|
||||
*/
|
||||
if (!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
|
||||
if (event_can_update_last_master_timestamp(ev))
|
||||
{
|
||||
/*
|
||||
Ignore FD's timestamp as it does not reflect the slave execution
|
||||
|
@ -4369,7 +4380,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
data modification event execution last long all this time
|
||||
Seconds_Behind_Master is zero.
|
||||
*/
|
||||
if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
|
||||
if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT &&
|
||||
rli->last_master_timestamp < ev->when)
|
||||
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
|
||||
|
||||
DBUG_ASSERT(rli->last_master_timestamp >= 0);
|
||||
|
@ -7761,7 +7773,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
|
|||
|
||||
if (hot_log)
|
||||
mysql_mutex_unlock(log_lock);
|
||||
rli->sql_thread_caught_up= false;
|
||||
DBUG_RETURN(ev);
|
||||
}
|
||||
if (opt_reckless_slave) // For mysql-test
|
||||
|
@ -7925,7 +7936,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
|
|||
rli->relay_log.wait_for_update_relay_log(rli->sql_driver_thd);
|
||||
// re-acquire data lock since we released it earlier
|
||||
mysql_mutex_lock(&rli->data_lock);
|
||||
rli->sql_thread_caught_up= false;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
|
@ -8116,12 +8126,19 @@ event(errno: %d cur_log->error: %d)",
|
|||
{
|
||||
sql_print_information("Error reading relay log event: %s",
|
||||
"slave SQL thread was killed");
|
||||
DBUG_RETURN(0);
|
||||
goto end;
|
||||
}
|
||||
|
||||
err:
|
||||
if (errmsg)
|
||||
sql_print_error("Error reading relay log event: %s", errmsg);
|
||||
|
||||
end:
|
||||
/*
|
||||
Set that we are not caught up so if there is a hang/problem on restart,
|
||||
Seconds_Behind_Master will still grow.
|
||||
*/
|
||||
rli->sql_thread_caught_up= false;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
#ifdef WITH_WSREP
|
||||
|
|
12
sql/slave.h
12
sql/slave.h
|
@ -49,6 +49,7 @@
|
|||
#include "rpl_filter.h"
|
||||
#include "rpl_tblmap.h"
|
||||
#include "rpl_gtid.h"
|
||||
#include "log_event.h"
|
||||
|
||||
#define SLAVE_NET_TIMEOUT 60
|
||||
|
||||
|
@ -293,6 +294,17 @@ extern char *report_host, *report_password;
|
|||
|
||||
extern I_List<THD> threads;
|
||||
|
||||
/*
|
||||
Check that a binlog event (read from the relay log) is valid to update
|
||||
last_master_timestamp. That is, a valid event is one with a consistent
|
||||
timestamp which originated from a primary server.
|
||||
*/
|
||||
static inline bool event_can_update_last_master_timestamp(Log_event *ev)
|
||||
{
|
||||
return ev && !(ev->is_artificial_event() || ev->is_relay_log_event() ||
|
||||
(ev->when == 0));
|
||||
}
|
||||
|
||||
#else
|
||||
#define close_active_mi() /* no-op */
|
||||
#endif /* HAVE_REPLICATION */
|
||||
|
|
|
@ -44,7 +44,8 @@ const LEX_CSTRING msg_optimize= { STRING_WITH_LEN("optimize") };
|
|||
|
||||
/* Prepare, run and cleanup for mysql_recreate_table() */
|
||||
|
||||
static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list)
|
||||
static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list,
|
||||
Recreate_info *recreate_info)
|
||||
{
|
||||
bool result_code;
|
||||
DBUG_ENTER("admin_recreate_table");
|
||||
|
@ -65,7 +66,7 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list)
|
|||
DEBUG_SYNC(thd, "ha_admin_try_alter");
|
||||
tmp_disable_binlog(thd); // binlogging is done by caller if wanted
|
||||
result_code= (thd->open_temporary_tables(table_list) ||
|
||||
mysql_recreate_table(thd, table_list, false));
|
||||
mysql_recreate_table(thd, table_list, recreate_info, false));
|
||||
reenable_binlog(thd);
|
||||
/*
|
||||
mysql_recreate_table() can push OK or ERROR.
|
||||
|
@ -560,6 +561,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
bool open_error= 0;
|
||||
bool collect_eis= FALSE;
|
||||
bool open_for_modify= org_open_for_modify;
|
||||
Recreate_info recreate_info;
|
||||
|
||||
storage_engine_name[0]= 0; // Marker that's not used
|
||||
|
||||
|
@ -829,7 +831,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
{
|
||||
/* We use extra_open_options to be able to open crashed tables */
|
||||
thd->open_options|= extra_open_options;
|
||||
result_code= admin_recreate_table(thd, table);
|
||||
result_code= admin_recreate_table(thd, table, &recreate_info) ?
|
||||
HA_ADMIN_FAILED : HA_ADMIN_OK;
|
||||
thd->open_options&= ~extra_open_options;
|
||||
goto send_result;
|
||||
}
|
||||
|
@ -1012,12 +1015,31 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
repair was not implemented and we need to upgrade the table
|
||||
to a new version so we recreate the table with ALTER TABLE
|
||||
*/
|
||||
result_code= admin_recreate_table(thd, table);
|
||||
result_code= admin_recreate_table(thd, table, &recreate_info);
|
||||
}
|
||||
send_result:
|
||||
|
||||
lex->cleanup_after_one_table_open();
|
||||
thd->clear_error(); // these errors shouldn't get client
|
||||
|
||||
if (recreate_info.records_duplicate())
|
||||
{
|
||||
protocol->prepare_for_resend();
|
||||
protocol->store(&table_name, system_charset_info);
|
||||
protocol->store(operator_name, system_charset_info);
|
||||
protocol->store(warning_level_names[Sql_condition::WARN_LEVEL_WARN].str,
|
||||
warning_level_names[Sql_condition::WARN_LEVEL_WARN].length,
|
||||
system_charset_info);
|
||||
char buf[80];
|
||||
size_t length= my_snprintf(buf, sizeof(buf),
|
||||
"Number of rows changed from %u to %u",
|
||||
(uint) recreate_info.records_processed(),
|
||||
(uint) recreate_info.records_copied());
|
||||
protocol->store(buf, length, system_charset_info);
|
||||
if (protocol->write())
|
||||
goto err;
|
||||
}
|
||||
|
||||
{
|
||||
Diagnostics_area::Sql_condition_iterator it=
|
||||
thd->get_stmt_da()->sql_conditions();
|
||||
|
@ -1128,7 +1150,7 @@ send_result_message:
|
|||
*save_next_global= table->next_global;
|
||||
table->next_local= table->next_global= 0;
|
||||
|
||||
result_code= admin_recreate_table(thd, table);
|
||||
result_code= admin_recreate_table(thd, table, &recreate_info);
|
||||
trans_commit_stmt(thd);
|
||||
trans_commit(thd);
|
||||
close_thread_tables(thd);
|
||||
|
@ -1340,6 +1362,8 @@ send_result_message:
|
|||
goto err;
|
||||
DEBUG_SYNC(thd, "admin_command_kill_after_modify");
|
||||
}
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
|
||||
if (is_table_modified && is_cmd_replicated &&
|
||||
(!opt_readonly || thd->slave_thread) && !thd->lex->no_write_to_binlog)
|
||||
{
|
||||
|
@ -1349,10 +1373,8 @@ send_result_message:
|
|||
if (res)
|
||||
goto err;
|
||||
}
|
||||
|
||||
my_eof(thd);
|
||||
thd->resume_subsequent_commits(suspended_wfc);
|
||||
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
|
||||
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
err:
|
||||
|
@ -1501,6 +1523,7 @@ bool Sql_cmd_optimize_table::execute(THD *thd)
|
|||
LEX *m_lex= thd->lex;
|
||||
TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first;
|
||||
bool res= TRUE;
|
||||
Recreate_info recreate_info;
|
||||
DBUG_ENTER("Sql_cmd_optimize_table::execute");
|
||||
|
||||
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table,
|
||||
|
@ -1509,7 +1532,7 @@ bool Sql_cmd_optimize_table::execute(THD *thd)
|
|||
|
||||
WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table);
|
||||
res= (specialflag & SPECIAL_NO_NEW_FUNC) ?
|
||||
mysql_recreate_table(thd, first_table, true) :
|
||||
mysql_recreate_table(thd, first_table, &recreate_info, true) :
|
||||
mysql_admin_table(thd, first_table, &m_lex->check_opt,
|
||||
&msg_optimize, TL_WRITE, 1, 0, 0, 0,
|
||||
&handler::ha_optimize, 0, true);
|
||||
|
|
|
@ -554,9 +554,11 @@ bool Sql_cmd_alter_table::execute(THD *thd)
|
|||
thd->work_part_info= 0;
|
||||
#endif
|
||||
|
||||
Recreate_info recreate_info;
|
||||
result= mysql_alter_table(thd, &select_lex->db, &lex->name,
|
||||
&create_info,
|
||||
first_table,
|
||||
&recreate_info,
|
||||
&alter_info,
|
||||
select_lex->order_list.elements,
|
||||
select_lex->order_list.first,
|
||||
|
|
|
@ -8340,6 +8340,20 @@ bool THD::timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void THD::my_ok_with_recreate_info(const Recreate_info &info,
|
||||
ulong warn_count)
|
||||
{
|
||||
char buf[80];
|
||||
my_snprintf(buf, sizeof(buf),
|
||||
ER_THD(this, ER_INSERT_INFO),
|
||||
(ulong) info.records_processed(),
|
||||
(ulong) info.records_duplicate(),
|
||||
warn_count);
|
||||
my_ok(this, info.records_processed(), 0L, buf);
|
||||
}
|
||||
|
||||
|
||||
THD_list_iterator *THD_list_iterator::iterator()
|
||||
{
|
||||
return &server_threads;
|
||||
|
|
|
@ -248,6 +248,29 @@ public:
|
|||
};
|
||||
|
||||
|
||||
class Recreate_info
|
||||
{
|
||||
ha_rows m_records_copied;
|
||||
ha_rows m_records_duplicate;
|
||||
public:
|
||||
Recreate_info()
|
||||
:m_records_copied(0),
|
||||
m_records_duplicate(0)
|
||||
{ }
|
||||
Recreate_info(ha_rows records_copied,
|
||||
ha_rows records_duplicate)
|
||||
:m_records_copied(records_copied),
|
||||
m_records_duplicate(records_duplicate)
|
||||
{ }
|
||||
ha_rows records_copied() const { return m_records_copied; }
|
||||
ha_rows records_duplicate() const { return m_records_duplicate; }
|
||||
ha_rows records_processed() const
|
||||
{
|
||||
return m_records_copied + m_records_duplicate;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#define TC_HEURISTIC_RECOVER_COMMIT 1
|
||||
#define TC_HEURISTIC_RECOVER_ROLLBACK 2
|
||||
extern ulong tc_heuristic_recover;
|
||||
|
@ -4362,6 +4385,8 @@ public:
|
|||
inline bool vio_ok() const { return TRUE; }
|
||||
inline bool is_connected() { return TRUE; }
|
||||
#endif
|
||||
|
||||
void my_ok_with_recreate_info(const Recreate_info &info, ulong warn_count);
|
||||
/**
|
||||
Mark the current error as fatal. Warning: this does not
|
||||
set any error, it sets a property of the error, so must be
|
||||
|
@ -6255,6 +6280,12 @@ public:
|
|||
uint sum_func_count;
|
||||
uint hidden_field_count;
|
||||
uint group_parts,group_length,group_null_parts;
|
||||
|
||||
/*
|
||||
If we're doing a GROUP BY operation, shows which one is used:
|
||||
true TemporaryTableWithPartialSums algorithm (see end_update()).
|
||||
false OrderedGroupBy algorithm (see end_write_group()).
|
||||
*/
|
||||
uint quick_group;
|
||||
/**
|
||||
Enabled when we have atleast one outer_sum_func. Needed when used
|
||||
|
|
|
@ -100,49 +100,6 @@ bool LEX::check_dependencies_in_with_clauses()
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief
|
||||
Resolve references to CTE in specification of hanging CTE
|
||||
|
||||
@details
|
||||
A CTE to which there are no references in the query is called hanging CTE.
|
||||
Although such CTE is not used for execution its specification must be
|
||||
subject to context analysis. All errors concerning references to
|
||||
non-existing tables or fields occurred in the specification must be
|
||||
reported as well as all other errors caught at the prepare stage.
|
||||
The specification of a hanging CTE might contain references to other
|
||||
CTE outside of the specification and within it if the specification
|
||||
contains a with clause. This function resolves all such references for
|
||||
all hanging CTEs encountered in the processed query.
|
||||
|
||||
@retval
|
||||
false on success
|
||||
true on failure
|
||||
*/
|
||||
|
||||
bool
|
||||
LEX::resolve_references_to_cte_in_hanging_cte()
|
||||
{
|
||||
for (With_clause *with_clause= with_clauses_list;
|
||||
with_clause; with_clause= with_clause->next_with_clause)
|
||||
{
|
||||
for (With_element *with_elem= with_clause->with_list.first;
|
||||
with_elem; with_elem= with_elem->next)
|
||||
{
|
||||
if (!with_elem->is_referenced())
|
||||
{
|
||||
TABLE_LIST *first_tbl=
|
||||
with_elem->spec->first_select()->table_list.first;
|
||||
TABLE_LIST **with_elem_end_pos= with_elem->head->tables_pos.end_pos;
|
||||
if (first_tbl && resolve_references_to_cte(first_tbl, with_elem_end_pos))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@brief
|
||||
Resolve table references to CTE from a sub-chain of table references
|
||||
|
@ -289,8 +246,6 @@ LEX::check_cte_dependencies_and_resolve_references()
|
|||
return false;
|
||||
if (resolve_references_to_cte(query_tables, query_tables_last))
|
||||
return true;
|
||||
if (resolve_references_to_cte_in_hanging_cte())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -489,47 +444,33 @@ With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl,
|
|||
st_unit_ctxt_elem *ctxt)
|
||||
{
|
||||
With_element *found= 0;
|
||||
st_select_lex_unit *top_unit= 0;
|
||||
for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt;
|
||||
unit_ctxt_elem;
|
||||
unit_ctxt_elem= unit_ctxt_elem->prev)
|
||||
{
|
||||
st_select_lex_unit *unit= unit_ctxt_elem->unit;
|
||||
With_clause *with_clause= unit->with_clause;
|
||||
/*
|
||||
First look for the table definition in the with clause attached to 'unit'
|
||||
if there is any such clause.
|
||||
*/
|
||||
if (with_clause)
|
||||
{
|
||||
found= with_clause->find_table_def(tbl, NULL);
|
||||
/*
|
||||
If the reference to tbl that has to be resolved belongs to
|
||||
the FROM clause of a descendant of top_unit->with_element
|
||||
and this with element belongs to with_clause then this
|
||||
element must be used as the barrier for the search in the
|
||||
the list of CTEs from with_clause unless the clause contains
|
||||
RECURSIVE.
|
||||
*/
|
||||
With_element *barrier= 0;
|
||||
if (top_unit && !with_clause->with_recursive &&
|
||||
top_unit->with_element &&
|
||||
top_unit->with_element->get_owner() == with_clause)
|
||||
barrier= top_unit->with_element;
|
||||
found= with_clause->find_table_def(tbl, barrier);
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
If 'unit' is the unit that defines a with element then reset 'unit'
|
||||
to the unit whose attached with clause contains this with element.
|
||||
*/
|
||||
With_element *with_elem= unit->with_element;
|
||||
if (with_elem)
|
||||
{
|
||||
if (!(unit_ctxt_elem= unit_ctxt_elem->prev))
|
||||
break;
|
||||
unit= unit_ctxt_elem->unit;
|
||||
}
|
||||
with_clause= unit->with_clause;
|
||||
/*
|
||||
Now look for the table definition in this with clause. If the with clause
|
||||
contains RECURSIVE the search is performed through all CTE definitions in
|
||||
clause, otherwise up to the definition of 'with_elem' unless it is NULL.
|
||||
*/
|
||||
if (with_clause)
|
||||
{
|
||||
found= with_clause->find_table_def(tbl,
|
||||
with_clause->with_recursive ?
|
||||
NULL : with_elem);
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
top_unit= unit;
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
|
|
@ -326,8 +326,6 @@ public:
|
|||
friend
|
||||
bool LEX::resolve_references_to_cte(TABLE_LIST *tables,
|
||||
TABLE_LIST **tables_last);
|
||||
friend
|
||||
bool LEX::resolve_references_to_cte_in_hanging_cte();
|
||||
};
|
||||
|
||||
const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8;
|
||||
|
@ -441,9 +439,6 @@ public:
|
|||
|
||||
friend
|
||||
bool LEX::check_dependencies_in_with_clauses();
|
||||
|
||||
friend
|
||||
bool LEX::resolve_references_to_cte_in_hanging_cte();
|
||||
};
|
||||
|
||||
inline
|
||||
|
|
|
@ -4028,7 +4028,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
|||
lex->current_select->join->select_options|= OPTION_BUFFER_RESULT;
|
||||
}
|
||||
else if (!(lex->current_select->options & OPTION_BUFFER_RESULT) &&
|
||||
thd->locked_tables_mode <= LTM_LOCK_TABLES)
|
||||
thd->locked_tables_mode <= LTM_LOCK_TABLES &&
|
||||
!table->s->long_unique_table)
|
||||
{
|
||||
/*
|
||||
We must not yet prepare the result table if it is the same as one of the
|
||||
|
|
|
@ -1291,8 +1291,6 @@ void LEX::start(THD *thd_arg)
|
|||
stmt_var_list.empty();
|
||||
proc_list.elements=0;
|
||||
|
||||
save_group_list.empty();
|
||||
save_order_list.empty();
|
||||
win_ref= NULL;
|
||||
win_frame= NULL;
|
||||
frame_top_bound= NULL;
|
||||
|
|
|
@ -1204,12 +1204,14 @@ public:
|
|||
group_list_ptrs, and re-establish the original list before each execution.
|
||||
*/
|
||||
SQL_I_List<ORDER> group_list;
|
||||
SQL_I_List<ORDER> save_group_list;
|
||||
Group_list_ptrs *group_list_ptrs;
|
||||
|
||||
List<Item> item_list; /* list of fields & expressions */
|
||||
List<Item> pre_fix; /* above list before fix_fields */
|
||||
List<Item> fix_after_optimize;
|
||||
SQL_I_List<ORDER> order_list; /* ORDER clause */
|
||||
SQL_I_List<ORDER> save_order_list;
|
||||
SQL_I_List<ORDER> gorder_list;
|
||||
Lex_select_limit limit_params; /* LIMIT clause parameters */
|
||||
|
||||
|
@ -3559,8 +3561,6 @@ public:
|
|||
}
|
||||
|
||||
|
||||
SQL_I_List<ORDER> save_group_list;
|
||||
SQL_I_List<ORDER> save_order_list;
|
||||
LEX_CSTRING *win_ref;
|
||||
Window_frame *win_frame;
|
||||
Window_frame_bound *frame_top_bound;
|
||||
|
@ -4823,8 +4823,8 @@ public:
|
|||
const LEX_CSTRING *constraint_name,
|
||||
Table_ident *ref_table_name,
|
||||
DDL_options ddl_options);
|
||||
|
||||
bool check_dependencies_in_with_clauses();
|
||||
bool resolve_references_to_cte_in_hanging_cte();
|
||||
bool check_cte_dependencies_and_resolve_references();
|
||||
bool resolve_references_to_cte(TABLE_LIST *tables,
|
||||
TABLE_LIST **tables_last);
|
||||
|
|
|
@ -54,7 +54,7 @@ public:
|
|||
{
|
||||
elements= tmp.elements;
|
||||
first= tmp.first;
|
||||
next= tmp.next;
|
||||
next= elements ? tmp.next : &first;;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
|
|
@ -4209,8 +4209,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
|
|||
|
||||
WSREP_TO_ISOLATION_BEGIN(first_table->db.str, first_table->table_name.str, NULL);
|
||||
|
||||
Recreate_info recreate_info;
|
||||
res= mysql_alter_table(thd, &first_table->db, &first_table->table_name,
|
||||
&create_info, first_table, &alter_info,
|
||||
&create_info, first_table,
|
||||
&recreate_info, &alter_info,
|
||||
0, (ORDER*) 0, 0, lex->if_exists());
|
||||
break;
|
||||
}
|
||||
|
@ -8832,8 +8834,8 @@ TABLE_LIST *st_select_lex::convert_right_join()
|
|||
void st_select_lex::prepare_add_window_spec(THD *thd)
|
||||
{
|
||||
LEX *lex= thd->lex;
|
||||
lex->save_group_list= group_list;
|
||||
lex->save_order_list= order_list;
|
||||
save_group_list= group_list;
|
||||
save_order_list= order_list;
|
||||
lex->win_ref= NULL;
|
||||
lex->win_frame= NULL;
|
||||
lex->frame_top_bound= NULL;
|
||||
|
@ -8860,8 +8862,8 @@ bool st_select_lex::add_window_def(THD *thd,
|
|||
win_part_list_ptr,
|
||||
win_order_list_ptr,
|
||||
win_frame);
|
||||
group_list= thd->lex->save_group_list;
|
||||
order_list= thd->lex->save_order_list;
|
||||
group_list= save_group_list;
|
||||
order_list= save_order_list;
|
||||
if (parsing_place != SELECT_LIST)
|
||||
{
|
||||
fields_in_window_functions+= win_part_list_ptr->elements +
|
||||
|
@ -8887,8 +8889,8 @@ bool st_select_lex::add_window_spec(THD *thd,
|
|||
win_part_list_ptr,
|
||||
win_order_list_ptr,
|
||||
win_frame);
|
||||
group_list= thd->lex->save_group_list;
|
||||
order_list= thd->lex->save_order_list;
|
||||
group_list= save_group_list;
|
||||
order_list= save_order_list;
|
||||
if (parsing_place != SELECT_LIST)
|
||||
{
|
||||
fields_in_window_functions+= win_part_list_ptr->elements +
|
||||
|
|
|
@ -342,7 +342,7 @@ static bool register_builtin(struct st_maria_plugin *, struct st_plugin_int *,
|
|||
struct st_plugin_int **);
|
||||
static void unlock_variables(THD *thd, struct system_variables *vars);
|
||||
static void cleanup_variables(struct system_variables *vars);
|
||||
static void plugin_vars_free_values(sys_var *vars);
|
||||
static void plugin_vars_free_values(st_mysql_sys_var **vars);
|
||||
static void restore_ptr_backup(uint n, st_ptr_backup *backup);
|
||||
static void intern_plugin_unlock(LEX *lex, plugin_ref plugin);
|
||||
static void reap_plugins(void);
|
||||
|
@ -1293,7 +1293,7 @@ static void plugin_del(struct st_plugin_int *plugin, uint del_mask)
|
|||
if (!(plugin->state & del_mask))
|
||||
DBUG_VOID_RETURN;
|
||||
/* Free allocated strings before deleting the plugin. */
|
||||
plugin_vars_free_values(plugin->system_vars);
|
||||
plugin_vars_free_values(plugin->plugin->system_vars);
|
||||
restore_ptr_backup(plugin->nbackups, plugin->ptr_backup);
|
||||
if (plugin->plugin_dl)
|
||||
{
|
||||
|
@ -2945,6 +2945,7 @@ sys_var *find_sys_var(THD *thd, const char *str, size_t length,
|
|||
/*
|
||||
called by register_var, construct_options and test_plugin_options.
|
||||
Returns the 'bookmark' for the named variable.
|
||||
returns null for non thd-local variables.
|
||||
LOCK_system_variables_hash should be at least read locked
|
||||
*/
|
||||
static st_bookmark *find_bookmark(const char *plugin, const char *name,
|
||||
|
@ -3001,7 +3002,6 @@ static size_t var_storage_size(int flags)
|
|||
|
||||
/*
|
||||
returns a bookmark for thd-local variables, creating if neccessary.
|
||||
returns null for non thd-local variables.
|
||||
Requires that a write lock is obtained on LOCK_system_variables_hash
|
||||
*/
|
||||
static st_bookmark *register_var(const char *plugin, const char *name,
|
||||
|
@ -3355,27 +3355,35 @@ void plugin_thdvar_cleanup(THD *thd)
|
|||
variables are no longer accessible and the value space is lost. Note
|
||||
that only string values with PLUGIN_VAR_MEMALLOC are allocated and
|
||||
must be freed.
|
||||
|
||||
@param[in] vars Chain of system variables of a plugin
|
||||
*/
|
||||
|
||||
static void plugin_vars_free_values(sys_var *vars)
|
||||
static void plugin_vars_free_values(st_mysql_sys_var **vars)
|
||||
{
|
||||
DBUG_ENTER("plugin_vars_free_values");
|
||||
|
||||
for (sys_var *var= vars; var; var= var->next)
|
||||
if (!vars)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
while(st_mysql_sys_var *var= *vars++)
|
||||
{
|
||||
sys_var_pluginvar *piv= var->cast_pluginvar();
|
||||
if (piv &&
|
||||
((piv->plugin_var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR) &&
|
||||
(piv->plugin_var->flags & PLUGIN_VAR_MEMALLOC))
|
||||
if ((var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR &&
|
||||
var->flags & PLUGIN_VAR_MEMALLOC)
|
||||
{
|
||||
/* Free the string from global_system_variables. */
|
||||
char **valptr= (char**) piv->real_value_ptr(NULL, OPT_GLOBAL);
|
||||
char **val;
|
||||
if (var->flags & PLUGIN_VAR_THDLOCAL)
|
||||
{
|
||||
st_bookmark *v= find_bookmark(0, var->name, var->flags);
|
||||
if (!v)
|
||||
continue;
|
||||
val= (char**)(global_system_variables.dynamic_variables_ptr + v->offset);
|
||||
}
|
||||
else
|
||||
val= *(char***) (var + 1);
|
||||
|
||||
DBUG_PRINT("plugin", ("freeing value for: '%s' addr: %p",
|
||||
var->name.str, valptr));
|
||||
my_free(*valptr);
|
||||
*valptr= NULL;
|
||||
var->name, val));
|
||||
my_free(*val);
|
||||
*val= NULL;
|
||||
}
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -4035,7 +4043,7 @@ static my_option *construct_help_options(MEM_ROOT *mem_root,
|
|||
bzero(opts, sizeof(my_option) * count);
|
||||
|
||||
/**
|
||||
some plugin variables (those that don't have PLUGIN_VAR_NOSYSVAR flag)
|
||||
some plugin variables
|
||||
have their names prefixed with the plugin name. Restore the names here
|
||||
to get the correct (not double-prefixed) help text.
|
||||
We won't need @@sysvars anymore and don't care about their proper names.
|
||||
|
@ -4147,9 +4155,6 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
|||
char *varname;
|
||||
sys_var *v;
|
||||
|
||||
if (o->flags & PLUGIN_VAR_NOSYSVAR)
|
||||
continue;
|
||||
|
||||
tmp_backup[tmp->nbackups++].save(&o->name);
|
||||
if ((var= find_bookmark(tmp->name.str, o->name, o->flags)))
|
||||
{
|
||||
|
@ -4165,6 +4170,12 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
|||
my_casedn_str(&my_charset_latin1, varname);
|
||||
convert_dash_to_underscore(varname, len-1);
|
||||
}
|
||||
if (o->flags & PLUGIN_VAR_NOSYSVAR)
|
||||
{
|
||||
o->name= varname;
|
||||
continue;
|
||||
}
|
||||
|
||||
const char *s= o->flags & PLUGIN_VAR_DEPRECATED ? "" : NULL;
|
||||
v= new (mem_root) sys_var_pluginvar(&chain, varname, tmp, o, s);
|
||||
v->test_load= (var ? &var->loaded : &static_unload);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue