mirror of
https://github.com/MariaDB/server.git
synced 2025-04-18 21:25:32 +02:00
Merge 10.6 into 10.7
This commit is contained in:
commit
27eaa963ff
75 changed files with 1001 additions and 990 deletions
client
extra/mariabackup
include
man
mysql-test
include
main
long_unique_bugs.resultlong_unique_bugs.testmysql_upgrade.resultorder_by_innodb.resultorder_by_innodb.testrange_innodb.resultrange_innodb.testrowid_filter_innodb_debug.resultrowid_filter_innodb_debug.testrowid_filter_myisam_debug.result
suite
federated
innodb
r
innodb-fkcheck.resultinnodb.resultinsert_into_empty.resultinstant_alter_crash.resulttruncate_foreign.result
t
innodb_fts
innodb_gis
innodb_zip
mariabackup
mysys
sql
derived_handler.ccfield.hha_partition.ccha_partition.hhandler.cchandler.hmysqld.ccsql_derived.ccsql_select.ccsql_select.hsql_table.ccsql_type_geom.h
storage
connect
innobase
|
@ -1748,7 +1748,8 @@ int cat_file(DYNAMIC_STRING* ds, const char* filename)
|
|||
len= (size_t) my_seek(fd, 0, SEEK_END, MYF(0));
|
||||
my_seek(fd, 0, SEEK_SET, MYF(0));
|
||||
if (len == (size_t)MY_FILEPOS_ERROR ||
|
||||
!(buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, len + 1, MYF(0))))
|
||||
!(buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, len + 1,
|
||||
MYF(MY_WME|MY_FAE))))
|
||||
{
|
||||
my_close(fd, MYF(0));
|
||||
return 1;
|
||||
|
@ -2407,7 +2408,7 @@ VAR *var_init(VAR *v, const char *name, size_t name_len, const char *val, size_t
|
|||
val_len= 0;
|
||||
val_alloc_len = val_len + 16; /* room to grow */
|
||||
if (!(tmp_var=v) && !(tmp_var = (VAR*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(*tmp_var)
|
||||
+ name_len+2, MYF(MY_WME))))
|
||||
+ name_len+2, MYF(MY_WME|MY_FAE))))
|
||||
die("Out of memory");
|
||||
|
||||
if (name != NULL)
|
||||
|
@ -2421,7 +2422,8 @@ VAR *var_init(VAR *v, const char *name, size_t name_len, const char *val, size_t
|
|||
|
||||
tmp_var->alloced = (v == 0);
|
||||
|
||||
if (!(tmp_var->str_val = (char*)my_malloc(PSI_NOT_INSTRUMENTED, val_alloc_len+1, MYF(MY_WME))))
|
||||
if (!(tmp_var->str_val = (char*)my_malloc(PSI_NOT_INSTRUMENTED,
|
||||
val_alloc_len+1, MYF(MY_WME|MY_FAE))))
|
||||
die("Out of memory");
|
||||
|
||||
if (val)
|
||||
|
@ -2969,8 +2971,10 @@ void var_copy(VAR *dest, VAR *src)
|
|||
/* Alloc/realloc data for str_val in dest */
|
||||
if (dest->alloced_len < src->alloced_len &&
|
||||
!(dest->str_val= dest->str_val
|
||||
? (char*)my_realloc(PSI_NOT_INSTRUMENTED, dest->str_val, src->alloced_len, MYF(MY_WME))
|
||||
: (char*)my_malloc(PSI_NOT_INSTRUMENTED, src->alloced_len, MYF(MY_WME))))
|
||||
? (char*)my_realloc(PSI_NOT_INSTRUMENTED, dest->str_val, src->alloced_len,
|
||||
MYF(MY_WME|MY_FAE))
|
||||
: (char*)my_malloc(PSI_NOT_INSTRUMENTED, src->alloced_len,
|
||||
MYF(MY_WME|MY_FAE))))
|
||||
die("Out of memory");
|
||||
else
|
||||
dest->alloced_len= src->alloced_len;
|
||||
|
@ -3047,8 +3051,10 @@ void eval_expr(VAR *v, const char *p, const char **p_end,
|
|||
MIN_VAR_ALLOC : new_val_len + 1;
|
||||
if (!(v->str_val =
|
||||
v->str_val ?
|
||||
(char*)my_realloc(PSI_NOT_INSTRUMENTED, v->str_val, v->alloced_len+1, MYF(MY_WME)) :
|
||||
(char*)my_malloc(PSI_NOT_INSTRUMENTED, v->alloced_len+1, MYF(MY_WME))))
|
||||
(char*)my_realloc(PSI_NOT_INSTRUMENTED, v->str_val, v->alloced_len+1,
|
||||
MYF(MY_WME|MY_FAE)) :
|
||||
(char*)my_malloc(PSI_NOT_INSTRUMENTED, v->alloced_len+1,
|
||||
MYF(MY_WME|MY_FAE))))
|
||||
die("Out of memory");
|
||||
}
|
||||
v->str_val_len = new_val_len;
|
||||
|
@ -4783,7 +4789,8 @@ void do_sync_with_master(struct st_command *command)
|
|||
p++;
|
||||
while (*p && my_isspace(charset_info, *p))
|
||||
p++;
|
||||
start= buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, strlen(p)+1,MYF(MY_WME | MY_FAE));
|
||||
start= buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, strlen(p)+1,
|
||||
MYF(MY_WME|MY_FAE));
|
||||
get_string(&buff, &p, command);
|
||||
}
|
||||
command->last_argument= p;
|
||||
|
@ -6923,7 +6930,7 @@ int read_command(struct st_command** command_ptr)
|
|||
}
|
||||
if (!(*command_ptr= command=
|
||||
(struct st_command*) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(*command),
|
||||
MYF(MY_WME|MY_ZEROFILL))) ||
|
||||
MYF(MY_WME|MY_FAE|MY_ZEROFILL))) ||
|
||||
insert_dynamic(&q_lines, &command))
|
||||
die("Out of memory");
|
||||
command->type= Q_UNKNOWN;
|
||||
|
@ -7631,18 +7638,19 @@ void append_stmt_result(DYNAMIC_STRING *ds, MYSQL_STMT *stmt,
|
|||
|
||||
/* Allocate array with bind structs, lengths and NULL flags */
|
||||
my_bind= (MYSQL_BIND*) my_malloc(PSI_NOT_INSTRUMENTED, num_fields * sizeof(MYSQL_BIND),
|
||||
MYF(MY_WME | MY_FAE | MY_ZEROFILL));
|
||||
MYF(MY_WME|MY_FAE|MY_ZEROFILL));
|
||||
length= (ulong*) my_malloc(PSI_NOT_INSTRUMENTED, num_fields * sizeof(ulong),
|
||||
MYF(MY_WME | MY_FAE));
|
||||
MYF(MY_WME|MY_FAE));
|
||||
is_null= (my_bool*) my_malloc(PSI_NOT_INSTRUMENTED, num_fields * sizeof(my_bool),
|
||||
MYF(MY_WME | MY_FAE));
|
||||
MYF(MY_WME|MY_FAE));
|
||||
|
||||
/* Allocate data for the result of each field */
|
||||
for (i= 0; i < num_fields; i++)
|
||||
{
|
||||
uint max_length= fields[i].max_length + 1;
|
||||
my_bind[i].buffer_type= MYSQL_TYPE_STRING;
|
||||
my_bind[i].buffer= my_malloc(PSI_NOT_INSTRUMENTED, max_length, MYF(MY_WME | MY_FAE));
|
||||
my_bind[i].buffer= my_malloc(PSI_NOT_INSTRUMENTED, max_length,
|
||||
MYF(MY_WME|MY_FAE));
|
||||
my_bind[i].buffer_length= max_length;
|
||||
my_bind[i].is_null= &is_null[i];
|
||||
my_bind[i].length= &length[i];
|
||||
|
@ -8666,7 +8674,7 @@ void run_bind_stmt(struct st_connection *cn, struct st_command *command,
|
|||
cn->ps_params= ps_params = (MYSQL_BIND*)my_malloc(PSI_NOT_INSTRUMENTED,
|
||||
sizeof(MYSQL_BIND) *
|
||||
stmt->param_count,
|
||||
MYF(MY_WME));
|
||||
MYF(MY_WME|MY_FAE));
|
||||
bzero((char *) ps_params, sizeof(MYSQL_BIND) * stmt->param_count);
|
||||
|
||||
int i=0;
|
||||
|
@ -8681,7 +8689,8 @@ void run_bind_stmt(struct st_connection *cn, struct st_command *command,
|
|||
if (!*c)
|
||||
{
|
||||
ps_params[i].buffer_type= MYSQL_TYPE_LONG;
|
||||
l= (long*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(long), MYF(MY_WME));
|
||||
l= (long*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(long),
|
||||
MYF(MY_WME|MY_FAE));
|
||||
*l= strtol(p, &c, 10);
|
||||
ps_params[i].buffer= (void*)l;
|
||||
ps_params[i].buffer_length= 8;
|
||||
|
@ -8693,7 +8702,7 @@ void run_bind_stmt(struct st_connection *cn, struct st_command *command,
|
|||
{
|
||||
ps_params[i].buffer_type= MYSQL_TYPE_DECIMAL;
|
||||
d= (double*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(double),
|
||||
MYF(MY_WME));
|
||||
MYF(MY_WME|MY_FAE));
|
||||
*d= strtod(p, &c);
|
||||
ps_params[i].buffer= (void*)d;
|
||||
ps_params[i].buffer_length= 8;
|
||||
|
@ -8701,7 +8710,8 @@ void run_bind_stmt(struct st_connection *cn, struct st_command *command,
|
|||
else
|
||||
{
|
||||
ps_params[i].buffer_type= MYSQL_TYPE_STRING;
|
||||
ps_params[i].buffer= my_strdup(PSI_NOT_INSTRUMENTED, p, MYF(MY_WME));
|
||||
ps_params[i].buffer= my_strdup(PSI_NOT_INSTRUMENTED, p,
|
||||
MYF(MY_WME|MY_FAE));
|
||||
ps_params[i].buffer_length= (unsigned long)strlen(p);
|
||||
}
|
||||
}
|
||||
|
@ -9739,7 +9749,7 @@ int main(int argc, char **argv)
|
|||
/* Init connections, allocate 1 extra as buffer + 1 for default */
|
||||
connections= (struct st_connection*)
|
||||
my_malloc(PSI_NOT_INSTRUMENTED, (opt_max_connections+2) * sizeof(struct st_connection),
|
||||
MYF(MY_WME | MY_ZEROFILL));
|
||||
MYF(MY_WME|MY_FAE|MY_ZEROFILL));
|
||||
connections_end= connections + opt_max_connections +1;
|
||||
next_con= connections + 1;
|
||||
|
||||
|
@ -10461,7 +10471,8 @@ void do_get_replace_column(struct st_command *command)
|
|||
die("Missing argument in %s", command->query);
|
||||
|
||||
/* Allocate a buffer for results */
|
||||
start= buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, strlen(from)+1,MYF(MY_WME | MY_FAE));
|
||||
start= buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, strlen(from)+1,
|
||||
MYF(MY_WME|MY_FAE));
|
||||
while (*from)
|
||||
{
|
||||
char *to;
|
||||
|
@ -10474,7 +10485,8 @@ void do_get_replace_column(struct st_command *command)
|
|||
command->query);
|
||||
to= get_string(&buff, &from, command);
|
||||
my_free(replace_column[column_number-1]);
|
||||
replace_column[column_number-1]= my_strdup(PSI_NOT_INSTRUMENTED, to, MYF(MY_WME | MY_FAE));
|
||||
replace_column[column_number-1]= my_strdup(PSI_NOT_INSTRUMENTED, to,
|
||||
MYF(MY_WME|MY_FAE));
|
||||
set_if_bigger(max_replace_column, column_number);
|
||||
}
|
||||
my_free(start);
|
||||
|
@ -10541,7 +10553,8 @@ void do_get_replace(struct st_command *command)
|
|||
bzero(&from_array,sizeof(from_array));
|
||||
if (!*from)
|
||||
die("Missing argument in %s", command->query);
|
||||
start= buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, strlen(from)+1,MYF(MY_WME | MY_FAE));
|
||||
start= buff= (char*)my_malloc(PSI_NOT_INSTRUMENTED, strlen(from)+1,
|
||||
MYF(MY_WME|MY_FAE));
|
||||
while (*from)
|
||||
{
|
||||
char *to= buff;
|
||||
|
@ -11199,7 +11212,7 @@ REPLACE *init_replace(char * *from, char * *to,uint count,
|
|||
DBUG_RETURN(0);
|
||||
found_sets=0;
|
||||
if (!(found_set= (FOUND_SET*) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(FOUND_SET)*max_length*count,
|
||||
MYF(MY_WME))))
|
||||
MYF(MY_WME|MY_FAE))))
|
||||
{
|
||||
free_sets(&sets);
|
||||
DBUG_RETURN(0);
|
||||
|
@ -11209,7 +11222,7 @@ REPLACE *init_replace(char * *from, char * *to,uint count,
|
|||
used_sets=-1;
|
||||
word_states=make_new_set(&sets); /* Start of new word */
|
||||
start_states=make_new_set(&sets); /* This is first state */
|
||||
if (!(follow=(FOLLOWS*) my_malloc(PSI_NOT_INSTRUMENTED, (states+2)*sizeof(FOLLOWS),MYF(MY_WME))))
|
||||
if (!(follow=(FOLLOWS*) my_malloc(PSI_NOT_INSTRUMENTED, (states+2)*sizeof(FOLLOWS),MYF(MY_WME|MY_FAE))))
|
||||
{
|
||||
free_sets(&sets);
|
||||
my_free(found_set);
|
||||
|
@ -11376,7 +11389,7 @@ REPLACE *init_replace(char * *from, char * *to,uint count,
|
|||
if ((replace=(REPLACE*) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(REPLACE)*(sets.count)+
|
||||
sizeof(REPLACE_STRING)*(found_sets+1)+
|
||||
sizeof(char *)*count+result_len,
|
||||
MYF(MY_WME | MY_ZEROFILL))))
|
||||
MYF(MY_WME|MY_FAE|MY_ZEROFILL))))
|
||||
{
|
||||
rep_str=(REPLACE_STRING*) (replace+sets.count);
|
||||
to_array= (char **) (rep_str+found_sets+1);
|
||||
|
@ -11419,10 +11432,10 @@ int init_sets(REP_SETS *sets,uint states)
|
|||
bzero(sets, sizeof(*sets));
|
||||
sets->size_of_bits=((states+7)/8);
|
||||
if (!(sets->set_buffer=(REP_SET*) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(REP_SET)*SET_MALLOC_HUNC,
|
||||
MYF(MY_WME))))
|
||||
MYF(MY_WME|MY_FAE))))
|
||||
return 1;
|
||||
if (!(sets->bit_buffer=(uint*) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(uint)*sets->size_of_bits*
|
||||
SET_MALLOC_HUNC,MYF(MY_WME))))
|
||||
SET_MALLOC_HUNC,MYF(MY_WME|MY_FAE))))
|
||||
{
|
||||
my_free(sets->set);
|
||||
return 1;
|
||||
|
@ -11618,10 +11631,10 @@ int insert_pointer_name(POINTER_ARRAY *pa,char * name)
|
|||
if (!(pa->typelib.type_names=(const char **)
|
||||
my_malloc(PSI_NOT_INSTRUMENTED, ((PC_MALLOC-MALLOC_OVERHEAD)/
|
||||
(sizeof(char *)+sizeof(*pa->flag))*
|
||||
(sizeof(char *)+sizeof(*pa->flag))),MYF(MY_WME))))
|
||||
(sizeof(char *)+sizeof(*pa->flag))),MYF(MY_WME|MY_FAE))))
|
||||
DBUG_RETURN(-1);
|
||||
if (!(pa->str= (uchar*) my_malloc(PSI_NOT_INSTRUMENTED, PS_MALLOC - MALLOC_OVERHEAD,
|
||||
MYF(MY_WME))))
|
||||
MYF(MY_WME|MY_FAE))))
|
||||
{
|
||||
my_free(pa->typelib.type_names);
|
||||
DBUG_RETURN (-1);
|
||||
|
|
|
@ -3818,10 +3818,6 @@ static dberr_t xb_assign_undo_space_start()
|
|||
uint32_t fsp_flags;
|
||||
int n_retries = 5;
|
||||
|
||||
if (srv_undo_tablespaces == 0) {
|
||||
return error;
|
||||
}
|
||||
|
||||
file = os_file_create(0, srv_sys_space.first_datafile()->filepath(),
|
||||
OS_FILE_OPEN, OS_FILE_NORMAL, OS_DATA_FILE, true, &ret);
|
||||
|
||||
|
|
|
@ -31,7 +31,10 @@ extern ulong my_time_to_wait_for_lock;
|
|||
#include <signal.h>
|
||||
#ifdef HAVE_SIGHANDLER_T
|
||||
#define sig_return sighandler_t
|
||||
#elif defined(SOLARIS) || defined(__sun) || defined(__APPLE__) || defined(__FreeBSD__) || defined(_AIX)
|
||||
#elif defined(SOLARIS) || defined(__sun) || defined(__APPLE__) || \
|
||||
defined(_AIX) || \
|
||||
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
|
||||
defined(__DragonFly__)
|
||||
typedef void (*sig_return)(int); /* Returns type from signal */
|
||||
#else
|
||||
typedef void (*sig_return)(void); /* Returns type from signal */
|
||||
|
|
|
@ -340,7 +340,9 @@ program to set the server\'s scheduling priority to the given value\&.
|
|||
.\}
|
||||
.\" mysqld_safe: no-auto-restart option
|
||||
.\" no-auto-restart option: mysqld_safe
|
||||
\fB\-\-no\-auto\-restart\fR
|
||||
\fB\-\-no\-auto\-restart\fR,
|
||||
\fB\-\-nowatch\fR,
|
||||
\fB\-\-no\-watch\fR
|
||||
.sp
|
||||
Exit after starting mysqld\&.
|
||||
.RE
|
||||
|
@ -368,21 +370,6 @@ Do not read any option files\&. This must be the first option on the command lin
|
|||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
.\" mysqld_safe: no-watch option
|
||||
.\" no-watch option: mysqld_safe
|
||||
\fB\-\-no\-auto\-restart\fR
|
||||
.sp
|
||||
Exit after starting mysqld\&.
|
||||
.RE
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
.\" mysqld_safe: numa-interleave option
|
||||
.\" numa-interleave option: mysqld_safe
|
||||
\fB\-\-numa\-interleave\fR
|
||||
|
|
|
@ -1,17 +1,14 @@
|
|||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
--source include/have_sequence.inc
|
||||
--source include/count_sessions.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-22761 KILL QUERY during rowid_filter, crashes
|
||||
--echo #
|
||||
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
|
||||
# 100 rows
|
||||
create table t2(a int);
|
||||
insert into t2 select A.a + B.a* 10 from t0 A, t0 B;
|
||||
insert into t2 select * from seq_0_to_99;
|
||||
|
||||
# 10K rows
|
||||
CREATE TABLE t3 (
|
||||
|
@ -26,11 +23,10 @@ where table_schema=database() and table_name='t3';
|
|||
|
||||
insert into t3
|
||||
select
|
||||
A.a,
|
||||
B.a,
|
||||
A.seq,
|
||||
B.seq,
|
||||
'filler-data-filler-data'
|
||||
from
|
||||
t2 A, t2 B;
|
||||
from seq_0_to_99 A, seq_0_to_99 B;
|
||||
|
||||
analyze table t2,t3;
|
||||
|
||||
|
@ -48,7 +44,6 @@ where
|
|||
t3.key1=t2.a and t3.key2 in (2,3);
|
||||
|
||||
connect (con1, localhost, root,,);
|
||||
connection con1;
|
||||
set debug_sync='now WAIT_FOR at_rowid_filter_check';
|
||||
evalp kill query $target_id;
|
||||
set debug_sync='now SIGNAL go';
|
||||
|
@ -60,6 +55,5 @@ disconnect con1;
|
|||
reap;
|
||||
set debug_sync='RESET';
|
||||
|
||||
drop table t0,t2,t3;
|
||||
drop table t2,t3;
|
||||
--source include/wait_until_count_sessions.inc
|
||||
|
||||
|
|
|
@ -317,6 +317,15 @@ ERROR 23000: Duplicate entry '1' for key 'v2'
|
|||
update t1,t2 set v1 = v2 , v5 = 0;
|
||||
ERROR 23000: Duplicate entry '-128' for key 'v1'
|
||||
drop table t1, t2;
|
||||
CREATE TABLE t1 (f TEXT UNIQUE);
|
||||
INSERT INTO t1 VALUES (NULL),(NULL);
|
||||
UPDATE t1 SET f = '';
|
||||
ERROR 23000: Duplicate entry '' for key 'f'
|
||||
SELECT * FROM t1;
|
||||
f
|
||||
|
||||
NULL
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-21540 Initialization of already inited long unique index on reorganize partition
|
||||
#
|
||||
|
|
|
@ -397,6 +397,17 @@ update t1 set v2 = 1, v3 = -128;
|
|||
update t1,t2 set v1 = v2 , v5 = 0;
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# MDEV-23264 Unique blobs allow duplicate values upon UPDATE
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (f TEXT UNIQUE);
|
||||
INSERT INTO t1 VALUES (NULL),(NULL);
|
||||
--error ER_DUP_ENTRY
|
||||
UPDATE t1 SET f = '';
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21540 Initialization of already inited long unique index on reorganize partition
|
||||
--echo #
|
||||
|
|
|
@ -1319,10 +1319,6 @@ partition p2008 values less than (2009)
|
|||
);
|
||||
select length(table_name) from mysql.innodb_table_stats;
|
||||
length(table_name)
|
||||
79
|
||||
79
|
||||
79
|
||||
79
|
||||
drop table extralongname_extralongname_extralongname_extralongname_ext;
|
||||
# End of 10.0 tests
|
||||
set sql_mode=default;
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
drop table if exists t0,t1,t2,t3;
|
||||
#
|
||||
# MDEV-6434: Wrong result (extra rows) with ORDER BY, multiple-column index, InnoDB
|
||||
#
|
||||
CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
INSERT INTO t1 (a,c) VALUES
|
||||
(8, 9),(8, 10),(13, 15),(16, 17),(16, 18),(16, 19),(20, 21),
|
||||
(20, 22),(20, 24),(20, 25),(20, 26),(20, 27),(20, 28);
|
||||
|
@ -14,8 +14,6 @@ DROP TABLE t1;
|
|||
#
|
||||
# MDEV-9457: Poor query plan chosen for ORDER BY query by a recent 10.1
|
||||
#
|
||||
create table t0 (a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
pk int primary key,
|
||||
key1 int,
|
||||
|
@ -23,15 +21,9 @@ key2 int,
|
|||
col1 char(255),
|
||||
key(key1),
|
||||
key(key2)
|
||||
) engine=innodb;
|
||||
set @a=-1;
|
||||
) engine=innodb stats_persistent=0;
|
||||
insert into t1
|
||||
select
|
||||
@a:=@a+1,
|
||||
@a,
|
||||
@a,
|
||||
repeat('abcd', 63)
|
||||
from t0 A, t0 B, t0 C, t0 D;
|
||||
select seq,seq,seq,repeat('abcd', 63) from seq_0_to_9999;
|
||||
# The following must NOT use 'index' on PK.
|
||||
# It should use index_merge(key1,key2) + filesort
|
||||
explain
|
||||
|
@ -47,7 +39,7 @@ from t1
|
|||
where key1<3 or key2<3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL # Using sort_union(key1,key2); Using where
|
||||
drop table t0, t1;
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-18094: Query with order by limit picking index scan over filesort
|
||||
#
|
||||
|
@ -78,9 +70,12 @@ drop table t1,t0;
|
|||
# MDEV-14071: wrong results with orderby_uses_equalities=on
|
||||
# (duplicate of MDEV-13994)
|
||||
#
|
||||
CREATE TABLE t1 (i int, j int, z int,PRIMARY KEY (i,j), KEY (z)) ENGINE=InnoDB;
|
||||
CREATE TABLE t2 (i int, j int, PRIMARY KEY (i,j)) ENGINE=InnoDB;
|
||||
CREATE TABLE t3 (j int, n varchar(5), PRIMARY KEY (j)) ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (i int, j int, z int,PRIMARY KEY (i,j), KEY (z)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
CREATE TABLE t2 (i int, j int, PRIMARY KEY (i,j)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
CREATE TABLE t3 (j int, n varchar(5), PRIMARY KEY (j)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
INSERT INTO t1 VALUES
|
||||
(127,0,1),(188,0,1),(206,0,1),(218,0,1),(292,0,1),(338,0,1),(375,0,1),
|
||||
(381,0,1),(409,0,1),(466,0,1),(469,0,1),(498,0,1),(656,0,1);
|
||||
|
@ -150,7 +145,8 @@ DROP TABLE t1,t2,t3;
|
|||
#
|
||||
# MDEV-25858: Query results are incorrect when indexes are added
|
||||
#
|
||||
CREATE TABLE t1 (id int NOT NULL PRIMARY KEY) engine=innodb;
|
||||
CREATE TABLE t1 (id int NOT NULL PRIMARY KEY) engine=innodb
|
||||
STATS_PERSISTENT=0;
|
||||
insert into t1 values (1),(2),(3);
|
||||
CREATE TABLE t2 (
|
||||
id int NOT NULL PRIMARY KEY,
|
||||
|
|
|
@ -2,16 +2,14 @@
|
|||
# ORDER BY handling (e.g. filesort) tests that require innodb
|
||||
#
|
||||
-- source include/have_innodb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t0,t1,t2,t3;
|
||||
--enable_warnings
|
||||
-- source include/have_sequence.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-6434: Wrong result (extra rows) with ORDER BY, multiple-column index, InnoDB
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
|
||||
INSERT INTO t1 (a,c) VALUES
|
||||
(8, 9),(8, 10),(13, 15),(16, 17),(16, 18),(16, 19),(20, 21),
|
||||
|
@ -24,9 +22,6 @@ DROP TABLE t1;
|
|||
--echo #
|
||||
--echo # MDEV-9457: Poor query plan chosen for ORDER BY query by a recent 10.1
|
||||
--echo #
|
||||
create table t0 (a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
|
||||
create table t1 (
|
||||
pk int primary key,
|
||||
key1 int,
|
||||
|
@ -34,16 +29,10 @@ create table t1 (
|
|||
col1 char(255),
|
||||
key(key1),
|
||||
key(key2)
|
||||
) engine=innodb;
|
||||
) engine=innodb stats_persistent=0;
|
||||
|
||||
set @a=-1;
|
||||
insert into t1
|
||||
select
|
||||
@a:=@a+1,
|
||||
@a,
|
||||
@a,
|
||||
repeat('abcd', 63)
|
||||
from t0 A, t0 B, t0 C, t0 D;
|
||||
select seq,seq,seq,repeat('abcd', 63) from seq_0_to_9999;
|
||||
|
||||
--echo # The following must NOT use 'index' on PK.
|
||||
--echo # It should use index_merge(key1,key2) + filesort
|
||||
|
@ -60,7 +49,7 @@ select *
|
|||
from t1
|
||||
where key1<3 or key2<3;
|
||||
|
||||
drop table t0, t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-18094: Query with order by limit picking index scan over filesort
|
||||
|
@ -93,9 +82,12 @@ drop table t1,t0;
|
|||
--echo # (duplicate of MDEV-13994)
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (i int, j int, z int,PRIMARY KEY (i,j), KEY (z)) ENGINE=InnoDB;
|
||||
CREATE TABLE t2 (i int, j int, PRIMARY KEY (i,j)) ENGINE=InnoDB;
|
||||
CREATE TABLE t3 (j int, n varchar(5), PRIMARY KEY (j)) ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (i int, j int, z int,PRIMARY KEY (i,j), KEY (z)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
CREATE TABLE t2 (i int, j int, PRIMARY KEY (i,j)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
CREATE TABLE t3 (j int, n varchar(5), PRIMARY KEY (j)) ENGINE=InnoDB
|
||||
STATS_PERSISTENT=0;
|
||||
|
||||
INSERT INTO t1 VALUES
|
||||
(127,0,1),(188,0,1),(206,0,1),(218,0,1),(292,0,1),(338,0,1),(375,0,1),
|
||||
|
@ -139,7 +131,8 @@ DROP TABLE t1,t2,t3;
|
|||
--echo # MDEV-25858: Query results are incorrect when indexes are added
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (id int NOT NULL PRIMARY KEY) engine=innodb;
|
||||
CREATE TABLE t1 (id int NOT NULL PRIMARY KEY) engine=innodb
|
||||
STATS_PERSISTENT=0;
|
||||
insert into t1 values (1),(2),(3);
|
||||
|
||||
CREATE TABLE t2 (
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
#
|
||||
# Range optimizer (and related) tests that need InnoDB.
|
||||
#
|
||||
drop table if exists t0, t1, t2;
|
||||
#
|
||||
# MDEV-6735: Range checked for each record used with key
|
||||
#
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1(a int);
|
||||
insert into t1 select A.a + B.a* 10 + C.a * 100 + D.a * 1000
|
||||
from t0 A, t0 B, t0 C, t0 D;
|
||||
create table t2 (
|
||||
a int,
|
||||
b int,
|
||||
|
@ -22,12 +18,12 @@ key(b)
|
|||
) engine=innodb;
|
||||
insert into t2
|
||||
select
|
||||
a,a,
|
||||
seq,seq,
|
||||
repeat('0123456789', 10),
|
||||
repeat('0123456789', 10),
|
||||
repeat('0123456789', 10),
|
||||
repeat('0123456789', 10)
|
||||
from t1;
|
||||
from seq_0_to_9999;
|
||||
analyze table t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
|
@ -37,7 +33,7 @@ explain select * from t0 left join t2 on t2.a <t0.a and t2.b between 50 and 250;
|
|||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t0 ALL NULL NULL NULL NULL 10
|
||||
1 SIMPLE t2 range a,b b 5 NULL 201 Using where; Using join buffer (flat, BNL join)
|
||||
drop table t0,t1,t2;
|
||||
drop table t0,t2;
|
||||
#
|
||||
# MDEV-10466: constructing an invalid SEL_ARG
|
||||
#
|
||||
|
@ -89,15 +85,14 @@ drop table t1,t2;
|
|||
#
|
||||
set @optimizer_switch_save= @@optimizer_switch;
|
||||
set optimizer_switch='index_merge_sort_intersection=off';
|
||||
create table t0 (a int)engine=innodb;
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int, b int, c int,
|
||||
key(a),key(b),key(c)
|
||||
)engine=innodb;
|
||||
insert into t1
|
||||
select A.a+10*B.a, A.a+10*B.a, A.a+10*B.a+100*C.a
|
||||
from t0 A, t0 B, t0 C, t0 D where D.a<5;
|
||||
select a.seq/10, a.seq/10, a.seq from seq_0_to_499 a, seq_0_to_4 b;
|
||||
SET @saved_dbug = @@GLOBAL.debug_dbug;
|
||||
set @@global.debug_dbug="+d,ha_index_init_fail";
|
||||
explain select * from t1 where a=10 and b=10;
|
||||
|
|
|
@ -4,12 +4,9 @@
|
|||
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_sequence.inc
|
||||
--source include/no_valgrind_without_big.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t0, t1, t2;
|
||||
--enable_warnings
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-6735: Range checked for each record used with key
|
||||
--echo #
|
||||
|
@ -17,10 +14,6 @@ drop table if exists t0, t1, t2;
|
|||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
|
||||
create table t1(a int);
|
||||
insert into t1 select A.a + B.a* 10 + C.a * 100 + D.a * 1000
|
||||
from t0 A, t0 B, t0 C, t0 D;
|
||||
|
||||
create table t2 (
|
||||
a int,
|
||||
b int,
|
||||
|
@ -34,18 +27,18 @@ create table t2 (
|
|||
|
||||
insert into t2
|
||||
select
|
||||
a,a,
|
||||
seq,seq,
|
||||
repeat('0123456789', 10),
|
||||
repeat('0123456789', 10),
|
||||
repeat('0123456789', 10),
|
||||
repeat('0123456789', 10)
|
||||
from t1;
|
||||
from seq_0_to_9999;
|
||||
|
||||
analyze table t2;
|
||||
--echo # The following must not use "Range checked for each record":
|
||||
explain select * from t0 left join t2 on t2.a <t0.a and t2.b between 50 and 250;
|
||||
|
||||
drop table t0,t1,t2;
|
||||
drop table t0,t2;
|
||||
|
||||
|
||||
--echo #
|
||||
|
@ -98,15 +91,14 @@ drop table t1,t2;
|
|||
|
||||
set @optimizer_switch_save= @@optimizer_switch;
|
||||
set optimizer_switch='index_merge_sort_intersection=off';
|
||||
create table t0 (a int)engine=innodb;
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int, b int, c int,
|
||||
key(a),key(b),key(c)
|
||||
)engine=innodb;
|
||||
insert into t1
|
||||
select A.a+10*B.a, A.a+10*B.a, A.a+10*B.a+100*C.a
|
||||
from t0 A, t0 B, t0 C, t0 D where D.a<5;
|
||||
select a.seq/10, a.seq/10, a.seq from seq_0_to_499 a, seq_0_to_4 b;
|
||||
SET @saved_dbug = @@GLOBAL.debug_dbug;
|
||||
set @@global.debug_dbug="+d,ha_index_init_fail";
|
||||
explain select * from t1 where a=10 and b=10;
|
||||
|
@ -122,8 +114,6 @@ set @@optimizer_switch= @optimizer_switch_save;
|
|||
--echo # MDEV-27262: Index intersection with full scan over an index
|
||||
--echo #
|
||||
|
||||
--source include/have_sequence.inc
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id int(10) unsigned NOT NULL AUTO_INCREMENT,
|
||||
p char(32) DEFAULT NULL,
|
||||
|
|
|
@ -2,10 +2,8 @@ set default_storage_engine=innodb;
|
|||
#
|
||||
# MDEV-22761 KILL QUERY during rowid_filter, crashes
|
||||
#
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t2(a int);
|
||||
insert into t2 select A.a + B.a* 10 from t0 A, t0 B;
|
||||
insert into t2 select * from seq_0_to_99;
|
||||
CREATE TABLE t3 (
|
||||
key1 int ,
|
||||
key2 int,
|
||||
|
@ -19,11 +17,10 @@ engine
|
|||
InnoDB
|
||||
insert into t3
|
||||
select
|
||||
A.a,
|
||||
B.a,
|
||||
A.seq,
|
||||
B.seq,
|
||||
'filler-data-filler-data'
|
||||
from
|
||||
t2 A, t2 B;
|
||||
from seq_0_to_99 A, seq_0_to_99 B;
|
||||
analyze table t2,t3;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
|
@ -42,7 +39,6 @@ select * from t2, t3
|
|||
where
|
||||
t3.key1=t2.a and t3.key2 in (2,3);
|
||||
connect con1, localhost, root,,;
|
||||
connection con1;
|
||||
set debug_sync='now WAIT_FOR at_rowid_filter_check';
|
||||
kill query $target_id;
|
||||
set debug_sync='now SIGNAL go';
|
||||
|
@ -50,7 +46,7 @@ connection default;
|
|||
disconnect con1;
|
||||
ERROR 70100: Query execution was interrupted
|
||||
set debug_sync='RESET';
|
||||
drop table t0,t2,t3;
|
||||
drop table t2,t3;
|
||||
set default_storage_engine=default;
|
||||
set @save_optimizer_switch= @@optimizer_switch;
|
||||
set @save_use_stat_tables= @@use_stat_tables;
|
||||
|
@ -67,7 +63,6 @@ INSERT INTO t1 VALUES (0,0),(1,0),(-1,1), (-2,1), (-2,3), (-3,4), (-2,4);
|
|||
set debug_sync='handler_rowid_filter_check SIGNAL killme WAIT_FOR go';
|
||||
SELECT * FROM t1 WHERE a > 0 AND b=0;
|
||||
connect con1, localhost, root,,;
|
||||
connection con1;
|
||||
set debug_sync='now WAIT_FOR killme';
|
||||
kill query @id;
|
||||
set debug_sync='now SIGNAL go';
|
||||
|
|
|
@ -31,7 +31,6 @@ set debug_sync='handler_rowid_filter_check SIGNAL killme WAIT_FOR go';
|
|||
send SELECT * FROM t1 WHERE a > 0 AND b=0;
|
||||
|
||||
connect (con1, localhost, root,,);
|
||||
connection con1;
|
||||
let $ignore= `SELECT @id := $ID`;
|
||||
set debug_sync='now WAIT_FOR killme';
|
||||
kill query @id;
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
#
|
||||
# MDEV-22761 KILL QUERY during rowid_filter, crashes
|
||||
#
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t2(a int);
|
||||
insert into t2 select A.a + B.a* 10 from t0 A, t0 B;
|
||||
insert into t2 select * from seq_0_to_99;
|
||||
CREATE TABLE t3 (
|
||||
key1 int ,
|
||||
key2 int,
|
||||
|
@ -18,11 +16,10 @@ engine
|
|||
MyISAM
|
||||
insert into t3
|
||||
select
|
||||
A.a,
|
||||
B.a,
|
||||
A.seq,
|
||||
B.seq,
|
||||
'filler-data-filler-data'
|
||||
from
|
||||
t2 A, t2 B;
|
||||
from seq_0_to_99 A, seq_0_to_99 B;
|
||||
analyze table t2,t3;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze status Engine-independent statistics collected
|
||||
|
@ -41,7 +38,6 @@ select * from t2, t3
|
|||
where
|
||||
t3.key1=t2.a and t3.key2 in (2,3);
|
||||
connect con1, localhost, root,,;
|
||||
connection con1;
|
||||
set debug_sync='now WAIT_FOR at_rowid_filter_check';
|
||||
kill query $target_id;
|
||||
set debug_sync='now SIGNAL go';
|
||||
|
@ -49,4 +45,4 @@ connection default;
|
|||
disconnect con1;
|
||||
ERROR 70100: Query execution was interrupted
|
||||
set debug_sync='RESET';
|
||||
drop table t0,t2,t3;
|
||||
drop table t2,t3;
|
||||
|
|
|
@ -471,6 +471,51 @@ a
|
|||
1
|
||||
2
|
||||
3
|
||||
#
|
||||
# MDEV-29655: ASAN heap-use-after-free in
|
||||
# Pushdown_derived::Pushdown_derived
|
||||
#
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
DEFAULT CHARSET=latin1;
|
||||
INSERT INTO federated.t1 VALUES
|
||||
(3,'xxx'), (7,'yyy'), (4,'xxx'), (1,'zzz'), (5,'yyy');
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
|
||||
use federated;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=2) dt2) dt;
|
||||
id name
|
||||
connection slave;
|
||||
CREATE TABLE federated.t10 (a INT,b INT);
|
||||
CREATE TABLE federated.t11 (a INT, b INT);
|
||||
INSERT INTO federated.t10 VALUES (1,1),(2,2);
|
||||
INSERT INTO federated.t11 VALUES (1,1),(2,2);
|
||||
connection master;
|
||||
CREATE TABLE federated.t10
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t10';
|
||||
CREATE TABLE federated.t11
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t11';
|
||||
use federated;
|
||||
SELECT * FROM t10 LEFT JOIN
|
||||
(t11, (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=2) dt2) dt
|
||||
) ON t10.a=t11.a;
|
||||
a b a b id name
|
||||
1 1 NULL NULL NULL NULL
|
||||
2 2 NULL NULL NULL NULL
|
||||
set global federated_pushdown=0;
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
|
|
@ -267,7 +267,6 @@ INSERT INTO federated.t2
|
|||
SELECT * FROM (SELECT * FROM federated.t1 LIMIT 70000) dt;
|
||||
SELECT COUNT(DISTINCT a) FROM federated.t2;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29640 FederatedX does not properly handle pushdown
|
||||
--echo # in case of difference in local and remote table names
|
||||
|
@ -314,6 +313,64 @@ CREATE TABLE federated.t3 (a INT)
|
|||
EXPLAIN SELECT * FROM federated.t3;
|
||||
SELECT * FROM federated.t3;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29655: ASAN heap-use-after-free in
|
||||
--echo # Pushdown_derived::Pushdown_derived
|
||||
--echo #
|
||||
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO federated.t1 VALUES
|
||||
(3,'xxx'), (7,'yyy'), (4,'xxx'), (1,'zzz'), (5,'yyy');
|
||||
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
|
||||
|
||||
use federated;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=2) dt2) dt;
|
||||
|
||||
connection slave;
|
||||
CREATE TABLE federated.t10 (a INT,b INT);
|
||||
CREATE TABLE federated.t11 (a INT, b INT);
|
||||
INSERT INTO federated.t10 VALUES (1,1),(2,2);
|
||||
INSERT INTO federated.t11 VALUES (1,1),(2,2);
|
||||
|
||||
connection master;
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval
|
||||
CREATE TABLE federated.t10
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t10';
|
||||
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval
|
||||
CREATE TABLE federated.t11
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t11';
|
||||
|
||||
use federated;
|
||||
SELECT * FROM t10 LEFT JOIN
|
||||
(t11, (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=2) dt2) dt
|
||||
) ON t10.a=t11.a;
|
||||
|
||||
set global federated_pushdown=0;
|
||||
|
||||
source include/federated_cleanup.inc;
|
||||
|
|
|
@ -33,11 +33,19 @@ b bigint unsigned NOT NULL,
|
|||
d1 date NOT NULL,
|
||||
PRIMARY KEY (b,d1)
|
||||
) ENGINE=InnoDB;
|
||||
DROP TABLE b;
|
||||
set foreign_key_checks = 1;
|
||||
CREATE TABLE b (
|
||||
b bigint unsigned NOT NULL,
|
||||
d1 date NOT NULL,
|
||||
PRIMARY KEY (b,d1)
|
||||
) ENGINE=InnoDB;
|
||||
ERROR HY000: Can't create table `bug_fk`.`b` (errno: 150 "Foreign key constraint is incorrectly formed")
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Error 1005 Can't create table `bug_fk`.`b` (errno: 150 "Foreign key constraint is incorrectly formed")
|
||||
Warning 1215 Cannot add foreign key constraint for `b`
|
||||
set foreign_key_checks = 0;
|
||||
DROP TABLE IF EXISTS d;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 'bug_fk.d'
|
||||
|
|
|
@ -2531,9 +2531,19 @@ disconnect b;
|
|||
set foreign_key_checks=0;
|
||||
create table t2 (a int primary key, b int, foreign key (b) references t1(a)) engine = innodb;
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed")
|
||||
set foreign_key_checks=1;
|
||||
insert into t2 values (1,1);
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`b`) REFERENCES `t1` (`a`))
|
||||
set foreign_key_checks=0;
|
||||
drop table t1;
|
||||
set foreign_key_checks=1;
|
||||
insert into t2 values (1,1);
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`b`) REFERENCES `t1` (`a`))
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
ERROR HY000: Can't create table `test`.`t1` (errno: 150 "Foreign key constraint is incorrectly formed")
|
||||
drop table t2;
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
drop table t1;
|
||||
set foreign_key_checks=0;
|
||||
create table t1(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=latin1;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=utf8;
|
||||
|
|
|
@ -193,6 +193,7 @@ SELECT n_rows FROM mysql.innodb_table_stats WHERE TABLE_NAME="t1";
|
|||
n_rows
|
||||
4096
|
||||
DROP TABLE t1;
|
||||
# End of 10.6 tests
|
||||
#
|
||||
# MDEV-26947 UNIQUE column checks fail in InnoDB resulting
|
||||
# in table corruption
|
||||
|
@ -367,3 +368,4 @@ Table Op Msg_type Msg_text
|
|||
test.t1 check status OK
|
||||
DROP TABLE t1;
|
||||
SET GLOBAL INNODB_DEFAULT_ROW_FORMAT= @format;
|
||||
# End of 10.7 tests
|
||||
|
|
|
@ -34,13 +34,15 @@ ROLLBACK;
|
|||
InnoDB 0 transactions not purged
|
||||
INSERT INTO t2 VALUES
|
||||
(16,1551,'Omnium enim rerum'),(128,1571,' principia parva sunt');
|
||||
BEGIN;
|
||||
UPDATE t1 SET c2=c2+1;
|
||||
connect ddl, localhost, root;
|
||||
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
|
||||
ALTER TABLE t2 DROP COLUMN c3, ADD COLUMN c5 TEXT DEFAULT 'naturam abhorrere';
|
||||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
UPDATE t1 SET c2=c2+1;
|
||||
COMMIT;
|
||||
# Kill the server
|
||||
disconnect ddl;
|
||||
# restart
|
||||
|
@ -61,6 +63,8 @@ DELETE FROM t2;
|
|||
ROLLBACK;
|
||||
InnoDB 0 transactions not purged
|
||||
INSERT INTO t2 VALUES (64,42,'De finibus bonorum'), (347,33101,' et malorum');
|
||||
BEGIN;
|
||||
DELETE FROM t1;
|
||||
connect ddl, localhost, root;
|
||||
ALTER TABLE t2 DROP COLUMN c3;
|
||||
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
|
||||
|
@ -68,7 +72,7 @@ ALTER TABLE t2 ADD COLUMN (c4 TEXT NOT NULL DEFAULT ' et malorum');
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
DELETE FROM t1;
|
||||
COMMIT;
|
||||
# Kill the server
|
||||
disconnect ddl;
|
||||
# restart
|
||||
|
@ -138,6 +142,8 @@ InnoDB 0 transactions not purged
|
|||
#
|
||||
# MDEV-24323 Crash on recovery after kill during instant ADD COLUMN
|
||||
#
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES(0,0);
|
||||
connect ddl, localhost, root;
|
||||
CREATE TABLE t3(id INT PRIMARY KEY, c2 INT, v2 INT AS(c2) VIRTUAL, UNIQUE(v2))
|
||||
ENGINE=InnoDB;
|
||||
|
@ -147,7 +153,7 @@ ALTER TABLE t3 ADD COLUMN c3 TEXT NOT NULL DEFAULT 'sic transit gloria mundi';
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
INSERT INTO t1 VALUES(0,0);
|
||||
COMMIT;
|
||||
# Kill the server
|
||||
disconnect ddl;
|
||||
# restart
|
||||
|
@ -183,13 +189,15 @@ DROP TABLE t2,t3;
|
|||
#
|
||||
CREATE TABLE t2(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO t2 VALUES (1),(2),(3),(4),(5),(6);
|
||||
BEGIN;
|
||||
DELETE FROM t1;
|
||||
connect ddl, localhost, root;
|
||||
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
|
||||
ALTER TABLE t2 ADD COLUMN b TINYINT UNSIGNED NOT NULL DEFAULT 42 FIRST;
|
||||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
DELETE FROM t1;
|
||||
COMMIT;
|
||||
# Kill the server
|
||||
disconnect ddl;
|
||||
# restart
|
||||
|
|
|
@ -80,9 +80,19 @@ SET FOREIGN_KEY_CHECKS=0;
|
|||
ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (a), ALGORITHM=COPY;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
LOCK TABLES t1 WRITE;
|
||||
SET FOREIGN_KEY_CHECKS=1;
|
||||
TRUNCATE t1;
|
||||
ERROR HY000: Cannot add foreign key constraint for `t1`
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
ERROR HY000: Table 't1' was not locked with LOCK TABLES
|
||||
SELECT * FROM t1;
|
||||
pk a
|
||||
1 1
|
||||
UNLOCK TABLES;
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
|
||||
SET FOREIGN_KEY_CHECKS=0;
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t1;
|
||||
pk a
|
||||
1 1
|
||||
|
|
|
@ -46,7 +46,15 @@ show create table c;
|
|||
#
|
||||
# Note that column b has different type in parent table
|
||||
#
|
||||
--error 1005
|
||||
CREATE TABLE b (
|
||||
b bigint unsigned NOT NULL,
|
||||
d1 date NOT NULL,
|
||||
PRIMARY KEY (b,d1)
|
||||
) ENGINE=InnoDB;
|
||||
DROP TABLE b;
|
||||
|
||||
set foreign_key_checks = 1;
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
CREATE TABLE b (
|
||||
b bigint unsigned NOT NULL,
|
||||
d1 date NOT NULL,
|
||||
|
@ -54,6 +62,7 @@ CREATE TABLE b (
|
|||
) ENGINE=InnoDB;
|
||||
|
||||
show warnings;
|
||||
set foreign_key_checks = 0;
|
||||
|
||||
DROP TABLE IF EXISTS d;
|
||||
|
||||
|
@ -64,7 +73,7 @@ CREATE TABLE d (
|
|||
CONSTRAINT bd_fk FOREIGN KEY (b) REFERENCES b (b)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
show warnings;
|
||||
show warnings;
|
||||
|
||||
set foreign_key_checks = 1;
|
||||
|
||||
|
|
|
@ -1598,12 +1598,22 @@ disconnect b;
|
|||
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a int primary key, b int, foreign key (b) references t1(a)) engine = innodb;
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
set foreign_key_checks=1;
|
||||
--error ER_NO_REFERENCED_ROW_2
|
||||
insert into t2 values (1,1);
|
||||
set foreign_key_checks=0;
|
||||
drop table t1;
|
||||
set foreign_key_checks=1;
|
||||
--error ER_NO_REFERENCED_ROW_2
|
||||
insert into t2 values (1,1);
|
||||
# Embedded server doesn't chdir to data directory
|
||||
--replace_result $MYSQLTEST_VARDIR . master-data/ ''
|
||||
--error ER_CANT_CREATE_TABLE
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
set foreign_key_checks=1;
|
||||
drop table t2;
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
drop table t1;
|
||||
|
||||
# test that FKs between different charsets are not accepted in CREATE even
|
||||
# when f_k_c is 0
|
||||
|
|
|
@ -208,6 +208,8 @@ source include/wait_condition.inc;
|
|||
SELECT n_rows FROM mysql.innodb_table_stats WHERE TABLE_NAME="t1";
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo # End of 10.6 tests
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-26947 UNIQUE column checks fail in InnoDB resulting
|
||||
--echo # in table corruption
|
||||
|
@ -375,3 +377,5 @@ INSERT IGNORE INTO t1 VALUES
|
|||
CHECK TABLE t1;
|
||||
DROP TABLE t1;
|
||||
SET GLOBAL INNODB_DEFAULT_ROW_FORMAT= @format;
|
||||
|
||||
--echo # End of 10.7 tests
|
||||
|
|
|
@ -47,6 +47,9 @@ ROLLBACK;
|
|||
INSERT INTO t2 VALUES
|
||||
(16,1551,'Omnium enim rerum'),(128,1571,' principia parva sunt');
|
||||
|
||||
BEGIN;
|
||||
UPDATE t1 SET c2=c2+1;
|
||||
|
||||
connect ddl, localhost, root;
|
||||
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
|
||||
--send
|
||||
|
@ -55,7 +58,7 @@ ALTER TABLE t2 DROP COLUMN c3, ADD COLUMN c5 TEXT DEFAULT 'naturam abhorrere';
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
UPDATE t1 SET c2=c2+1;
|
||||
COMMIT;
|
||||
|
||||
--source include/kill_mysqld.inc
|
||||
disconnect ddl;
|
||||
|
@ -73,6 +76,8 @@ ROLLBACK;
|
|||
--source include/wait_all_purged.inc
|
||||
|
||||
INSERT INTO t2 VALUES (64,42,'De finibus bonorum'), (347,33101,' et malorum');
|
||||
BEGIN;
|
||||
DELETE FROM t1;
|
||||
|
||||
connect ddl, localhost, root;
|
||||
ALTER TABLE t2 DROP COLUMN c3;
|
||||
|
@ -83,7 +88,7 @@ ALTER TABLE t2 ADD COLUMN (c4 TEXT NOT NULL DEFAULT ' et malorum');
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
DELETE FROM t1;
|
||||
COMMIT;
|
||||
|
||||
--source include/kill_mysqld.inc
|
||||
disconnect ddl;
|
||||
|
@ -177,6 +182,9 @@ DELETE FROM t2;
|
|||
--echo #
|
||||
--echo # MDEV-24323 Crash on recovery after kill during instant ADD COLUMN
|
||||
--echo #
|
||||
BEGIN;
|
||||
INSERT INTO t1 VALUES(0,0);
|
||||
|
||||
connect ddl, localhost, root;
|
||||
CREATE TABLE t3(id INT PRIMARY KEY, c2 INT, v2 INT AS(c2) VIRTUAL, UNIQUE(v2))
|
||||
ENGINE=InnoDB;
|
||||
|
@ -189,7 +197,7 @@ ALTER TABLE t3 ADD COLUMN c3 TEXT NOT NULL DEFAULT 'sic transit gloria mundi';
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
INSERT INTO t1 VALUES(0,0);
|
||||
COMMIT;
|
||||
|
||||
--source include/kill_mysqld.inc
|
||||
disconnect ddl;
|
||||
|
@ -207,6 +215,9 @@ DROP TABLE t2,t3;
|
|||
CREATE TABLE t2(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB;
|
||||
INSERT INTO t2 VALUES (1),(2),(3),(4),(5),(6);
|
||||
|
||||
BEGIN;
|
||||
DELETE FROM t1;
|
||||
|
||||
connect ddl, localhost, root;
|
||||
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
|
||||
--send
|
||||
|
@ -215,7 +226,7 @@ ALTER TABLE t2 ADD COLUMN b TINYINT UNSIGNED NOT NULL DEFAULT 42 FIRST;
|
|||
connection default;
|
||||
SET DEBUG_SYNC='now WAIT_FOR ddl';
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
DELETE FROM t1;
|
||||
COMMIT;
|
||||
|
||||
--source include/kill_mysqld.inc
|
||||
disconnect ddl;
|
||||
|
|
|
@ -92,8 +92,19 @@ SET FOREIGN_KEY_CHECKS=0;
|
|||
ALTER TABLE t1 ADD FOREIGN KEY (a) REFERENCES t1 (a), ALGORITHM=COPY;
|
||||
INSERT INTO t1 VALUES (1,1);
|
||||
LOCK TABLES t1 WRITE;
|
||||
SET FOREIGN_KEY_CHECKS=1;
|
||||
--error ER_CANNOT_ADD_FOREIGN
|
||||
TRUNCATE t1;
|
||||
# Whether TRUNCATE succeeds or fails, it will reload FOREIGN KEY constraints.
|
||||
# As a result, ha_innobase::referenced_by_foreign_key() will retun TRUE
|
||||
# (for the self-referential key), and the statement will fail.
|
||||
--error ER_TABLE_NOT_LOCKED
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t1;
|
||||
UNLOCK TABLES;
|
||||
--error ER_NO_REFERENCED_ROW_2
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SET FOREIGN_KEY_CHECKS=0;
|
||||
INSERT INTO t1 VALUES (2,2);
|
||||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -62,7 +62,6 @@ SET @saved_debug_dbug= @@debug_dbug;
|
|||
CREATE TABLE t1 (b CHAR(12), FULLTEXT KEY(b)) engine=InnoDB;
|
||||
SET debug_dbug='+d,ib_create_table_fail_too_many_trx';
|
||||
TRUNCATE t1;
|
||||
ERROR HY000: Got error -1 "Internal error < 0 (Not system error)" from storage engine InnoDB
|
||||
SET debug_dbug=@saved_debug_dbug;
|
||||
DROP TABLE t1;
|
||||
# End of 10.3 tests
|
||||
|
|
|
@ -91,7 +91,6 @@ SET @saved_debug_dbug= @@debug_dbug;
|
|||
|
||||
CREATE TABLE t1 (b CHAR(12), FULLTEXT KEY(b)) engine=InnoDB;
|
||||
SET debug_dbug='+d,ib_create_table_fail_too_many_trx';
|
||||
--error ER_GET_ERRNO
|
||||
TRUNCATE t1;
|
||||
SET debug_dbug=@saved_debug_dbug;
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -795,4 +795,14 @@ ENGINE=InnoDB;
|
|||
INSERT INTO t VALUES (REPEAT('MariaDB Corporation Ab ',351),POINT(0,0));
|
||||
ALTER TABLE t FORCE;
|
||||
DROP TABLE t;
|
||||
#
|
||||
# MDEV-29856 heap-use-after-poison in row_merge_spatial_rows()
|
||||
# with PRIMARY KEY on column prefix
|
||||
#
|
||||
CREATE TABLE t (id INT, f TEXT, s POINT NOT NULL,
|
||||
PRIMARY KEY(id,f(1)), SPATIAL(s)) ENGINE=InnoDB;
|
||||
INSERT INTO t VALUES
|
||||
(1,REPEAT('x',8192),@p:=ST_GeomFromText('POINT(0 0)')),(2,'',@p);
|
||||
ALTER TABLE t FORCE;
|
||||
DROP TABLE t;
|
||||
# End of 10.3 tests
|
||||
|
|
|
@ -791,4 +791,16 @@ ALTER TABLE t FORCE;
|
|||
# Cleanup
|
||||
DROP TABLE t;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29856 heap-use-after-poison in row_merge_spatial_rows()
|
||||
--echo # with PRIMARY KEY on column prefix
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t (id INT, f TEXT, s POINT NOT NULL,
|
||||
PRIMARY KEY(id,f(1)), SPATIAL(s)) ENGINE=InnoDB;
|
||||
INSERT INTO t VALUES
|
||||
(1,REPEAT('x',8192),@p:=ST_GeomFromText('POINT(0 0)')),(2,'',@p);
|
||||
ALTER TABLE t FORCE;
|
||||
DROP TABLE t;
|
||||
|
||||
--echo # End of 10.3 tests
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
|
1
mysql-test/suite/mariabackup/full_backup.opt
Normal file
1
mysql-test/suite/mariabackup/full_backup.opt
Normal file
|
@ -0,0 +1 @@
|
|||
--innodb_undo_tablespaces=2
|
|
@ -12,3 +12,18 @@ SELECT * FROM t;
|
|||
i
|
||||
1
|
||||
DROP TABLE t;
|
||||
#
|
||||
# MDEV-27121 mariabackup incompatible with disabled dedicated
|
||||
# undo log tablespaces
|
||||
#
|
||||
call mtr.add_suppression("InnoDB: innodb_undo_tablespaces=0 disables dedicated undo log tablespaces");
|
||||
# restart: --innodb_undo_tablespaces=0
|
||||
# xtrabackup backup
|
||||
# xtrabackup prepare
|
||||
# shutdown server
|
||||
# remove datadir
|
||||
# xtrabackup move back
|
||||
# restart: --innodb_undo_tablespaces=0
|
||||
# Display undo log files from target directory
|
||||
undo001
|
||||
undo002
|
||||
|
|
|
@ -29,3 +29,27 @@ SELECT * FROM t;
|
|||
DROP TABLE t;
|
||||
rmdir $targetdir;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27121 mariabackup incompatible with disabled dedicated
|
||||
--echo # undo log tablespaces
|
||||
--echo #
|
||||
call mtr.add_suppression("InnoDB: innodb_undo_tablespaces=0 disables dedicated undo log tablespaces");
|
||||
|
||||
let $restart_parameters=--innodb_undo_tablespaces=0;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
echo # xtrabackup backup;
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
|
||||
--enable_result_log
|
||||
|
||||
echo # xtrabackup prepare;
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --prepare --target-dir=$targetdir;
|
||||
-- source include/restart_and_restore.inc
|
||||
--enable_result_log
|
||||
|
||||
--echo # Display undo log files from target directory
|
||||
list_files $targetdir undo*;
|
||||
|
||||
rmdir $targetdir;
|
||||
|
|
5
mysql-test/suite/mariabackup/mdev-14447.combinations
Normal file
5
mysql-test/suite/mariabackup/mdev-14447.combinations
Normal file
|
@ -0,0 +1,5 @@
|
|||
[crc32]
|
||||
--innodb-checksum-algorithm=crc32
|
||||
|
||||
[full_crc32]
|
||||
--innodb-checksum-algorithm=full_crc32
|
|
@ -202,7 +202,7 @@ int start_addr2line_fork(const char *binary_path)
|
|||
close(out[0]);
|
||||
close(out[1]);
|
||||
execlp("addr2line", "addr2line", "-C", "-f", "-e", binary_path, NULL);
|
||||
exit(1);
|
||||
_exit(1);
|
||||
}
|
||||
|
||||
close(in[0]);
|
||||
|
@ -319,12 +319,20 @@ int my_addr_resolve(void *ptr, my_addr_loc *loc)
|
|||
/* Save result for future comparisons. */
|
||||
strnmov(addr2line_binary, info.dli_fname, sizeof(addr2line_binary));
|
||||
|
||||
#ifdef _AIX
|
||||
/*
|
||||
info.dli_fbase is a char on AIX and casting it doesn't fool gcc.
|
||||
leave backtracing broken on AIX until a real solution can be found.
|
||||
*/
|
||||
addr_offset= NULL;
|
||||
#else
|
||||
/*
|
||||
Check if we should use info.dli_fbase as an offset or not
|
||||
for the base program. This is depending on if the compilation is
|
||||
done with PIE or not.
|
||||
*/
|
||||
addr_offset= (void*) info.dli_fbase;
|
||||
addr_offset= info.dli_fbase;
|
||||
#endif
|
||||
#ifndef __PIE__
|
||||
if (strcmp(info.dli_fname, my_progname) == 0 &&
|
||||
addr_resolve((void*) my_addr_resolve, loc) == 0 &&
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
#ifndef MAIN
|
||||
|
||||
#if defined(_AIX) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__linux__) || defined(__sun) || defined(_WIN32)
|
||||
#if defined(_AIX) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__linux__) || defined(__sun) || defined(_WIN32)
|
||||
static my_bool memcpy_and_test(uchar *to, uchar *from, uint len)
|
||||
{
|
||||
uint i, res= 1;
|
||||
|
@ -35,8 +35,14 @@ static my_bool memcpy_and_test(uchar *to, uchar *from, uint len)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) || defined(__FreeBSD__)
|
||||
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__)
|
||||
#ifdef __OpenBSD__
|
||||
#include <netinet/in.h>
|
||||
#include <net/if_arp.h>
|
||||
#include <netinet/if_ether.h>
|
||||
#else
|
||||
#include <net/ethernet.h>
|
||||
#endif
|
||||
#include <sys/sysctl.h>
|
||||
#include <net/route.h>
|
||||
#include <net/if.h>
|
||||
|
|
|
@ -44,11 +44,6 @@ Pushdown_derived::Pushdown_derived(TABLE_LIST *tbl, derived_handler *h)
|
|||
}
|
||||
|
||||
|
||||
Pushdown_derived::~Pushdown_derived()
|
||||
{
|
||||
delete handler;
|
||||
}
|
||||
|
||||
|
||||
int Pushdown_derived::execute()
|
||||
{
|
||||
|
|
21
sql/field.h
21
sql/field.h
|
@ -1766,12 +1766,6 @@ public:
|
|||
Used by the ALTER TABLE
|
||||
*/
|
||||
virtual bool is_equal(const Column_definition &new_field) const= 0;
|
||||
// Used as double dispatch pattern: calls virtual method of handler
|
||||
virtual bool
|
||||
can_be_converted_by_engine(const Column_definition &new_type) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
/* convert decimal to longlong with overflow check */
|
||||
longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag,
|
||||
int *err);
|
||||
|
@ -4058,11 +4052,6 @@ public:
|
|||
void sql_type(String &str) const override;
|
||||
void sql_rpl_type(String*) const override;
|
||||
bool is_equal(const Column_definition &new_field) const override;
|
||||
bool can_be_converted_by_engine(const Column_definition &new_type) const
|
||||
override
|
||||
{
|
||||
return table->file->can_convert_string(this, new_type);
|
||||
}
|
||||
uchar *pack(uchar *to, const uchar *from, uint max_length) override;
|
||||
const uchar *unpack(uchar* to, const uchar *from, const uchar *from_end,
|
||||
uint param_data) override;
|
||||
|
@ -4217,11 +4206,6 @@ public:
|
|||
uchar *new_ptr, uint32 length,
|
||||
uchar *new_null_ptr, uint new_null_bit) override;
|
||||
bool is_equal(const Column_definition &new_field) const override;
|
||||
bool can_be_converted_by_engine(const Column_definition &new_type) const
|
||||
override
|
||||
{
|
||||
return table->file->can_convert_varstring(this, new_type);
|
||||
}
|
||||
void hash(ulong *nr, ulong *nr2) override;
|
||||
uint length_size() const override { return length_bytes; }
|
||||
void print_key_value(String *out, uint32 length) override;
|
||||
|
@ -4660,11 +4644,6 @@ public:
|
|||
uint32 char_length() const override;
|
||||
uint32 character_octet_length() const override;
|
||||
bool is_equal(const Column_definition &new_field) const override;
|
||||
bool can_be_converted_by_engine(const Column_definition &new_type) const
|
||||
override
|
||||
{
|
||||
return table->file->can_convert_blob(this, new_type);
|
||||
}
|
||||
void print_key_value(String *out, uint32 length) override;
|
||||
Binlog_type_info binlog_type_info() const override;
|
||||
|
||||
|
|
|
@ -12125,35 +12125,12 @@ int ha_partition::info_push(uint info_type, void *info)
|
|||
|
||||
|
||||
bool
|
||||
ha_partition::can_convert_string(const Field_string* field,
|
||||
const Column_definition& new_type) const
|
||||
ha_partition::can_convert_nocopy(const Field &field,
|
||||
const Column_definition &new_type) const
|
||||
{
|
||||
for (uint index= 0; index < m_tot_parts; index++)
|
||||
{
|
||||
if (!m_file[index]->can_convert_string(field, new_type))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ha_partition::can_convert_varstring(const Field_varstring* field,
|
||||
const Column_definition& new_type) const{
|
||||
for (uint index= 0; index < m_tot_parts; index++)
|
||||
{
|
||||
if (!m_file[index]->can_convert_varstring(field, new_type))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ha_partition::can_convert_blob(const Field_blob* field,
|
||||
const Column_definition& new_type) const
|
||||
{
|
||||
for (uint index= 0; index < m_tot_parts; index++)
|
||||
{
|
||||
if (!m_file[index]->can_convert_blob(field, new_type))
|
||||
if (!m_file[index]->can_convert_nocopy(field, new_type))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -1629,16 +1629,8 @@ public:
|
|||
|
||||
friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2);
|
||||
friend int cmp_key_part_id(void *key_p, uchar *ref1, uchar *ref2);
|
||||
bool can_convert_string(
|
||||
const Field_string* field,
|
||||
const Column_definition& new_field) const override;
|
||||
|
||||
bool can_convert_varstring(
|
||||
const Field_varstring* field,
|
||||
const Column_definition& new_field) const override;
|
||||
|
||||
bool can_convert_blob(
|
||||
const Field_blob* field,
|
||||
const Column_definition& new_field) const override;
|
||||
bool can_convert_nocopy(const Field &field,
|
||||
const Column_definition &new_field) const override;
|
||||
};
|
||||
#endif /* HA_PARTITION_INCLUDED */
|
||||
|
|
|
@ -7317,8 +7317,13 @@ int handler::check_duplicate_long_entries_update(const uchar *new_rec)
|
|||
{
|
||||
int error;
|
||||
field= keypart->field;
|
||||
/* Compare fields if they are different then check for duplicates */
|
||||
if (field->cmp_binary_offset(reclength))
|
||||
/*
|
||||
Compare fields if they are different then check for duplicates
|
||||
cmp_binary_offset cannot differentiate between null and empty string
|
||||
So also check for that too
|
||||
*/
|
||||
if((field->is_null(0) != field->is_null(reclength)) ||
|
||||
field->cmp_binary_offset(reclength))
|
||||
{
|
||||
if((error= check_duplicate_long_entry_key(new_rec, i)))
|
||||
return error;
|
||||
|
|
|
@ -5097,18 +5097,8 @@ public:
|
|||
These functions check for such possibility.
|
||||
Implementation could be based on Field_xxx::is_equal()
|
||||
*/
|
||||
virtual bool can_convert_string(const Field_string *field,
|
||||
const Column_definition &new_type) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
virtual bool can_convert_varstring(const Field_varstring *field,
|
||||
const Column_definition &new_type) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
virtual bool can_convert_blob(const Field_blob *field,
|
||||
const Column_definition &new_type) const
|
||||
virtual bool can_convert_nocopy(const Field &,
|
||||
const Column_definition &) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -4120,8 +4120,8 @@ static int init_common_variables()
|
|||
files= my_set_max_open_files(max_open_files);
|
||||
SYSVAR_AUTOSIZE_IF_CHANGED(open_files_limit, files, ulong);
|
||||
|
||||
if (files < wanted_files && global_system_variables.log_warnings)
|
||||
sql_print_warning("Could not increase number of max_open_files to more than %u (request: %u)", files, wanted_files);
|
||||
if (files < max_open_files && global_system_variables.log_warnings)
|
||||
sql_print_warning("Could not increase number of max_open_files to more than %u (request: %u)", files, max_open_files);
|
||||
|
||||
/* If we required too much tc_instances than we reduce */
|
||||
SYSVAR_AUTOSIZE_IF_CHANGED(tc_instances,
|
||||
|
|
|
@ -1016,11 +1016,7 @@ bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived)
|
|||
/* Create an object for execution of the query specifying the table */
|
||||
if (!(derived->pushdown_derived=
|
||||
new (thd->mem_root) Pushdown_derived(derived, derived->dt_handler)))
|
||||
{
|
||||
delete derived->dt_handler;
|
||||
derived->dt_handler= NULL;
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
lex->current_select= first_select;
|
||||
|
@ -1245,7 +1241,6 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
|
|||
/* Execute the query that specifies the derived table by a foreign engine */
|
||||
res= derived->pushdown_derived->execute();
|
||||
unit->executed= true;
|
||||
delete derived->pushdown_derived;
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@
|
|||
#include "select_handler.h"
|
||||
#include "my_json_writer.h"
|
||||
#include "opt_trace.h"
|
||||
#include "derived_handler.h"
|
||||
#include "create_tmp_table.h"
|
||||
|
||||
/*
|
||||
|
@ -14578,6 +14579,7 @@ void JOIN::cleanup(bool full)
|
|||
}
|
||||
}
|
||||
}
|
||||
free_pushdown_handlers(*join_list);
|
||||
}
|
||||
/* Restore ref array to original state */
|
||||
if (current_ref_ptrs != items0)
|
||||
|
@ -14588,6 +14590,32 @@ void JOIN::cleanup(bool full)
|
|||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/**
|
||||
Clean up all derived pushdown handlers in this join.
|
||||
|
||||
@detail
|
||||
Note that dt_handler is picked at the prepare stage (as opposed
|
||||
to optimization stage where one could expect this).
|
||||
Because of that, we have to do cleanups in this function that is called
|
||||
from JOIN::cleanup() and not in JOIN_TAB::cleanup.
|
||||
*/
|
||||
void JOIN::free_pushdown_handlers(List<TABLE_LIST>& join_list)
|
||||
{
|
||||
List_iterator<TABLE_LIST> li(join_list);
|
||||
TABLE_LIST *table_ref;
|
||||
while ((table_ref= li++))
|
||||
{
|
||||
if (table_ref->nested_join)
|
||||
free_pushdown_handlers(table_ref->nested_join->join_list);
|
||||
if (table_ref->pushdown_derived)
|
||||
{
|
||||
delete table_ref->pushdown_derived;
|
||||
table_ref->pushdown_derived= NULL;
|
||||
}
|
||||
delete table_ref->dt_handler;
|
||||
table_ref->dt_handler= NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Remove the following expressions from ORDER BY and GROUP BY:
|
||||
|
@ -28069,12 +28097,6 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
|
|||
result, unit, first);
|
||||
}
|
||||
|
||||
if (unit->derived && unit->derived->pushdown_derived)
|
||||
{
|
||||
delete unit->derived->pushdown_derived;
|
||||
unit->derived->pushdown_derived= NULL;
|
||||
}
|
||||
|
||||
DBUG_RETURN(res || thd->is_error());
|
||||
}
|
||||
|
||||
|
|
|
@ -1791,6 +1791,7 @@ private:
|
|||
bool add_having_as_table_cond(JOIN_TAB *tab);
|
||||
bool make_aggr_tables_info();
|
||||
bool add_fields_for_current_rowid(JOIN_TAB *cur, List<Item> *fields);
|
||||
void free_pushdown_handlers(List<TABLE_LIST>& join_list);
|
||||
void init_join_cache_and_keyread();
|
||||
bool transform_in_predicates_into_equalities(THD *thd);
|
||||
bool transform_all_conds_and_on_exprs(THD *thd,
|
||||
|
@ -2471,8 +2472,6 @@ public:
|
|||
|
||||
Pushdown_derived(TABLE_LIST *tbl, derived_handler *h);
|
||||
|
||||
~Pushdown_derived();
|
||||
|
||||
int execute();
|
||||
};
|
||||
|
||||
|
|
|
@ -6535,7 +6535,7 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
|
|||
bool is_equal= field->is_equal(*new_field);
|
||||
if (!is_equal)
|
||||
{
|
||||
if (field->can_be_converted_by_engine(*new_field))
|
||||
if (field->table->file->can_convert_nocopy(*field, *new_field))
|
||||
{
|
||||
/*
|
||||
New column type differs from the old one, but storage engine can
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define SQL_TYPE_GEOM_H_INCLUDED
|
||||
/*
|
||||
Copyright (c) 2015 MariaDB Foundation
|
||||
Copyright (c) 2019 MariaDB
|
||||
Copyright (c) 2019, 2022, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -397,12 +397,6 @@ public:
|
|||
!table->copy_blobs;
|
||||
}
|
||||
bool is_equal(const Column_definition &new_field) const override;
|
||||
bool can_be_converted_by_engine(const Column_definition &new_type)
|
||||
const override
|
||||
{
|
||||
return false; // Override the Field_blob behavior
|
||||
}
|
||||
|
||||
int store(const char *to, size_t length, CHARSET_INFO *charset) override;
|
||||
int store(double nr) override;
|
||||
int store(longlong nr, bool unsigned_val) override;
|
||||
|
|
|
@ -319,3 +319,12 @@ SELECT * from pg_in_maria;
|
|||
my space column
|
||||
My value
|
||||
DROP TABLE pg_in_maria;
|
||||
#
|
||||
# MDEV-29397 UPDATE with WHERE on part of two-part primary key causes
|
||||
# info to turn into error.
|
||||
#
|
||||
CREATE TABLE t1 (a VARCHAR(6), b VARCHAR(6), PRIMARY KEY(a, b)) ENGINE=CONNECT TABNAME='schema1.t3' CHARSET=utf8 DATA_CHARSET=utf8 TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEnginePostgresql;UID=mtr;PWD=mtr';
|
||||
UPDATE t1 SET a='10' WHERE a='20';
|
||||
Warnings:
|
||||
Note 1105 schema1.t3: 0 affected rows
|
||||
DROP TABLE t1;
|
||||
|
|
|
@ -223,3 +223,12 @@ DROP TABLE t1;
|
|||
CREATE TABLE pg_in_maria ENGINE=CONNECT TABNAME='schema1.space_in_column_name' CHARSET=utf8 DATA_CHARSET=utf8 TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEnginePostgresql;UID=mtr;PWD=mtr' quoted=1;
|
||||
SELECT * from pg_in_maria;
|
||||
DROP TABLE pg_in_maria;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29397 UPDATE with WHERE on part of two-part primary key causes
|
||||
--echo # info to turn into error.
|
||||
--echo #
|
||||
CREATE TABLE t1 (a VARCHAR(6), b VARCHAR(6), PRIMARY KEY(a, b)) ENGINE=CONNECT TABNAME='schema1.t3' CHARSET=utf8 DATA_CHARSET=utf8 TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEnginePostgresql;UID=mtr;PWD=mtr';
|
||||
UPDATE t1 SET a='10' WHERE a='20';
|
||||
DROP TABLE t1;
|
||||
|
||||
|
|
|
@ -2583,7 +2583,7 @@ int ODBConn::Rewind(char *sql, ODBCCOL *tocols)
|
|||
int rc, rbuf = -1;
|
||||
|
||||
if (!m_hstmt)
|
||||
rbuf = -1;
|
||||
rbuf = 0;
|
||||
else if (m_Full)
|
||||
rbuf = m_Rows; // No need to "rewind"
|
||||
else if (m_Scrollable) {
|
||||
|
|
|
@ -472,7 +472,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
|
|||
my_len= res - buf + 1;
|
||||
my_schema_table= (char *) malloc(my_len);
|
||||
memcpy(my_schema_table, buf, my_len - 1);
|
||||
my_schema_table[my_len] = 0;
|
||||
my_schema_table[my_len - 1] = 0;
|
||||
Query->Append(Quote);
|
||||
Query->Append(my_schema_table);
|
||||
Query->Append(Quote);
|
||||
|
@ -480,7 +480,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
|
|||
Query->Append(".");
|
||||
// Parse table
|
||||
my_len= strlen(buf) - my_len + 1;
|
||||
my_schema_table= (char *) malloc(my_len);
|
||||
my_schema_table= (char *) malloc(my_len + 1);
|
||||
memcpy(my_schema_table, ++res, my_len);
|
||||
my_schema_table[my_len] = 0;
|
||||
Query->Append(Quote);
|
||||
|
|
|
@ -563,8 +563,9 @@ inline void PageBulk::finish()
|
|||
void PageBulk::commit(bool success)
|
||||
{
|
||||
finish();
|
||||
if (success && !dict_index_is_clust(m_index) && page_is_leaf(m_page))
|
||||
ibuf_set_bitmap_for_bulk_load(m_block, innobase_fill_factor == 100);
|
||||
if (success && !m_index->is_clust() && page_is_leaf(m_page))
|
||||
ibuf_set_bitmap_for_bulk_load(m_block, &m_mtr,
|
||||
innobase_fill_factor == 100);
|
||||
m_mtr.commit();
|
||||
}
|
||||
|
||||
|
@ -634,7 +635,7 @@ PageBulk::getSplitRec()
|
|||
< total_used_size / 2);
|
||||
|
||||
/* Keep at least one record on left page */
|
||||
if (page_rec_is_second(rec, m_page)) {
|
||||
if (page_rec_is_first(rec, m_page)) {
|
||||
rec = page_rec_get_next(rec);
|
||||
ut_ad(page_rec_is_user_rec(rec));
|
||||
}
|
||||
|
|
|
@ -588,7 +588,7 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
|
|||
DBUG_EXECUTE_IF(
|
||||
"page_intermittent_checksum_mismatch", {
|
||||
static int page_counter;
|
||||
if (page_counter++ == 2) {
|
||||
if (page_counter++ == 3) {
|
||||
crc32++;
|
||||
}
|
||||
});
|
||||
|
@ -723,7 +723,7 @@ bool buf_page_is_corrupted(bool check_lsn, const byte *read_buf,
|
|||
DBUG_EXECUTE_IF(
|
||||
"page_intermittent_checksum_mismatch", {
|
||||
static int page_counter;
|
||||
if (page_counter++ == 2) return true;
|
||||
if (page_counter++ == 3) return true;
|
||||
});
|
||||
|
||||
if ((checksum_field1 != crc32
|
||||
|
|
|
@ -685,8 +685,7 @@ dict_acquire_mdl_shared(dict_table_t *table,
|
|||
}
|
||||
else
|
||||
{
|
||||
ut_ad(dict_sys.frozen());
|
||||
ut_ad(!dict_sys.locked());
|
||||
ut_ad(dict_sys.frozen_not_locked());
|
||||
db_len= dict_get_db_name_len(table->name.m_name);
|
||||
}
|
||||
|
||||
|
@ -1003,7 +1002,7 @@ void dict_sys_t::lock_wait(SRW_LOCK_ARGS(const char *file, unsigned line))
|
|||
latch_ex_wait_start.store(0, std::memory_order_relaxed);
|
||||
ut_ad(!latch_readers);
|
||||
ut_ad(!latch_ex);
|
||||
ut_d(latch_ex= true);
|
||||
ut_d(latch_ex= pthread_self());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1021,15 +1020,15 @@ void dict_sys_t::lock_wait(SRW_LOCK_ARGS(const char *file, unsigned line))
|
|||
latch.wr_lock(SRW_LOCK_ARGS(file, line));
|
||||
ut_ad(!latch_readers);
|
||||
ut_ad(!latch_ex);
|
||||
ut_d(latch_ex= true);
|
||||
ut_d(latch_ex= pthread_self());
|
||||
}
|
||||
|
||||
#ifdef UNIV_PFS_RWLOCK
|
||||
ATTRIBUTE_NOINLINE void dict_sys_t::unlock()
|
||||
{
|
||||
ut_ad(latch_ex);
|
||||
ut_ad(latch_ex == pthread_self());
|
||||
ut_ad(!latch_readers);
|
||||
ut_d(latch_ex= false);
|
||||
ut_d(latch_ex= 0);
|
||||
latch.wr_unlock();
|
||||
}
|
||||
|
||||
|
@ -1498,6 +1497,7 @@ dict_table_t::rename_tablespace(span<const char> new_name, bool replace) const
|
|||
err= DB_TABLESPACE_EXISTS;
|
||||
else
|
||||
{
|
||||
space->x_lock();
|
||||
err= space->rename(path, true, replace);
|
||||
if (data_dir)
|
||||
{
|
||||
|
@ -1505,6 +1505,7 @@ dict_table_t::rename_tablespace(span<const char> new_name, bool replace) const
|
|||
new_name= {name.m_name, strlen(name.m_name)};
|
||||
RemoteDatafile::delete_link_file(new_name);
|
||||
}
|
||||
space->x_unlock();
|
||||
}
|
||||
|
||||
ut_free(path);
|
||||
|
@ -2757,17 +2758,6 @@ dict_index_build_internal_fts(
|
|||
}
|
||||
/*====================== FOREIGN KEY PROCESSING ========================*/
|
||||
|
||||
/*********************************************************************//**
|
||||
Checks if a table is referenced by foreign keys.
|
||||
@return TRUE if table is referenced by a foreign key */
|
||||
ibool
|
||||
dict_table_is_referenced_by_foreign_key(
|
||||
/*====================================*/
|
||||
const dict_table_t* table) /*!< in: InnoDB table */
|
||||
{
|
||||
return(!table->referenced_set.empty());
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Removes a foreign constraint struct from the dictionary cache. */
|
||||
void
|
||||
|
|
|
@ -267,8 +267,7 @@ void trx_t::commit(std::vector<pfs_os_file_t> &deleted)
|
|||
if (btr_defragment_active)
|
||||
btr_defragment_remove_table(table);
|
||||
const fil_space_t *space= table->space;
|
||||
ut_ad(!strstr(table->name.m_name, "/FTS_") ||
|
||||
purge_sys.must_wait_FTS());
|
||||
ut_ad(!p.second.is_aux_table() || purge_sys.must_wait_FTS());
|
||||
dict_sys.remove(table);
|
||||
if (const auto id= space ? space->id : 0)
|
||||
{
|
||||
|
|
|
@ -1373,6 +1373,13 @@ static dberr_t fts_drop_table(trx_t *trx, const char *table_name, bool rename)
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
for (auto &p : trx->mod_tables)
|
||||
{
|
||||
if (p.first == table)
|
||||
p.second.set_aux_table();
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
|
|||
#include <my_bitmap.h>
|
||||
#include <mysql/service_thd_alloc.h>
|
||||
#include <mysql/service_thd_wait.h>
|
||||
#include "field.h"
|
||||
#include "sql_type_geom.h"
|
||||
#include "scope.h"
|
||||
#include "srv0srv.h"
|
||||
|
||||
|
@ -11454,6 +11454,8 @@ innobase_fts_load_stopword(
|
|||
trx_t* trx, /*!< in: transaction */
|
||||
THD* thd) /*!< in: current thread */
|
||||
{
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
const char *stopword_table= THDVAR(thd, ft_user_stopword_table);
|
||||
if (!stopword_table)
|
||||
{
|
||||
|
@ -11463,9 +11465,11 @@ innobase_fts_load_stopword(
|
|||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||
}
|
||||
|
||||
return !high_level_read_only &&
|
||||
fts_load_stopword(table, trx, stopword_table,
|
||||
THDVAR(thd, ft_enable_stopword), false);
|
||||
table->fts->dict_locked= true;
|
||||
bool success= fts_load_stopword(table, trx, stopword_table,
|
||||
THDVAR(thd, ft_enable_stopword), false);
|
||||
table->fts->dict_locked= false;
|
||||
return success;
|
||||
}
|
||||
|
||||
/** Parse the table name into normal name and remote path if needed.
|
||||
|
@ -12801,15 +12805,18 @@ int create_table_info_t::create_table(bool create_fk)
|
|||
dberr_t err = create_fk ? create_foreign_keys() : DB_SUCCESS;
|
||||
|
||||
if (err == DB_SUCCESS) {
|
||||
const dict_err_ignore_t ignore_err = m_trx->check_foreigns
|
||||
? DICT_ERR_IGNORE_NONE : DICT_ERR_IGNORE_FK_NOKEY;
|
||||
|
||||
/* Check that also referencing constraints are ok */
|
||||
dict_names_t fk_tables;
|
||||
err = dict_load_foreigns(m_table_name, nullptr,
|
||||
m_trx->id, true,
|
||||
DICT_ERR_IGNORE_NONE, fk_tables);
|
||||
ignore_err, fk_tables);
|
||||
while (err == DB_SUCCESS && !fk_tables.empty()) {
|
||||
dict_sys.load_table(
|
||||
{fk_tables.front(), strlen(fk_tables.front())},
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
ignore_err);
|
||||
fk_tables.pop_front();
|
||||
}
|
||||
}
|
||||
|
@ -13090,96 +13097,59 @@ bool create_table_info_t::row_size_is_acceptable(
|
|||
return true;
|
||||
}
|
||||
|
||||
/** Update a new table in an InnoDB database.
|
||||
@return error number */
|
||||
int
|
||||
create_table_info_t::create_table_update_dict()
|
||||
void create_table_info_t::create_table_update_dict(dict_table_t *table,
|
||||
THD *thd,
|
||||
const HA_CREATE_INFO &info,
|
||||
const TABLE &t)
|
||||
{
|
||||
dict_table_t* innobase_table;
|
||||
ut_ad(dict_sys.locked());
|
||||
|
||||
DBUG_ENTER("create_table_update_dict");
|
||||
DBUG_ASSERT(table->get_ref_count());
|
||||
if (table->fts)
|
||||
{
|
||||
if (!table->fts_doc_id_index)
|
||||
table->fts_doc_id_index=
|
||||
dict_table_get_index_on_name(table, FTS_DOC_ID_INDEX_NAME);
|
||||
else
|
||||
DBUG_ASSERT(table->fts_doc_id_index ==
|
||||
dict_table_get_index_on_name(table, FTS_DOC_ID_INDEX_NAME));
|
||||
}
|
||||
|
||||
innobase_table = dict_table_open_on_name(
|
||||
m_table_name, false, DICT_ERR_IGNORE_NONE);
|
||||
DBUG_ASSERT(!table->fts == !table->fts_doc_id_index);
|
||||
|
||||
DBUG_ASSERT(innobase_table != 0);
|
||||
if (innobase_table->fts != NULL) {
|
||||
if (innobase_table->fts_doc_id_index == NULL) {
|
||||
innobase_table->fts_doc_id_index
|
||||
= dict_table_get_index_on_name(
|
||||
innobase_table, FTS_DOC_ID_INDEX_NAME);
|
||||
DBUG_ASSERT(innobase_table->fts_doc_id_index != NULL);
|
||||
} else {
|
||||
DBUG_ASSERT(innobase_table->fts_doc_id_index
|
||||
== dict_table_get_index_on_name(
|
||||
innobase_table,
|
||||
FTS_DOC_ID_INDEX_NAME));
|
||||
}
|
||||
}
|
||||
innobase_copy_frm_flags_from_create_info(table, &info);
|
||||
|
||||
DBUG_ASSERT((innobase_table->fts == NULL)
|
||||
== (innobase_table->fts_doc_id_index == NULL));
|
||||
/* Load server stopword into FTS cache */
|
||||
if (table->flags2 & DICT_TF2_FTS &&
|
||||
innobase_fts_load_stopword(table, nullptr, thd))
|
||||
fts_optimize_add_table(table);
|
||||
|
||||
innobase_copy_frm_flags_from_create_info(innobase_table, m_create_info);
|
||||
if (const Field *ai = t.found_next_number_field)
|
||||
{
|
||||
ut_ad(ai->stored_in_db());
|
||||
ib_uint64_t autoinc= info.auto_increment_value;
|
||||
if (autoinc == 0)
|
||||
autoinc= 1;
|
||||
|
||||
dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE);
|
||||
table->autoinc_mutex.wr_lock();
|
||||
dict_table_autoinc_initialize(table, autoinc);
|
||||
|
||||
/* Load server stopword into FTS cache */
|
||||
if (m_flags2 & DICT_TF2_FTS) {
|
||||
if (!innobase_fts_load_stopword(innobase_table, NULL, m_thd)) {
|
||||
innobase_table->release();
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
if (!table->is_temporary())
|
||||
{
|
||||
const unsigned col_no= innodb_col_no(ai);
|
||||
table->persistent_autoinc= static_cast<uint16_t>
|
||||
(dict_table_get_nth_col_pos(table, col_no, nullptr) + 1) &
|
||||
dict_index_t::MAX_N_FIELDS;
|
||||
/* Persist the "last used" value, which typically is AUTO_INCREMENT - 1.
|
||||
In btr_create(), the value 0 was already written. */
|
||||
if (--autoinc)
|
||||
btr_write_autoinc(dict_table_get_first_index(table), autoinc);
|
||||
}
|
||||
|
||||
dict_sys.lock(SRW_LOCK_CALL);
|
||||
fts_optimize_add_table(innobase_table);
|
||||
dict_sys.unlock();
|
||||
}
|
||||
table->autoinc_mutex.wr_unlock();
|
||||
}
|
||||
|
||||
if (const Field* ai = m_form->found_next_number_field) {
|
||||
ut_ad(ai->stored_in_db());
|
||||
|
||||
ib_uint64_t autoinc = m_create_info->auto_increment_value;
|
||||
|
||||
if (autoinc == 0) {
|
||||
autoinc = 1;
|
||||
}
|
||||
|
||||
innobase_table->autoinc_mutex.wr_lock();
|
||||
dict_table_autoinc_initialize(innobase_table, autoinc);
|
||||
|
||||
if (innobase_table->is_temporary()) {
|
||||
/* AUTO_INCREMENT is not persistent for
|
||||
TEMPORARY TABLE. Temporary tables are never
|
||||
evicted. Keep the counter in memory only. */
|
||||
} else {
|
||||
const unsigned col_no = innodb_col_no(ai);
|
||||
|
||||
innobase_table->persistent_autoinc
|
||||
= static_cast<uint16_t>(
|
||||
dict_table_get_nth_col_pos(
|
||||
innobase_table, col_no, NULL)
|
||||
+ 1)
|
||||
& dict_index_t::MAX_N_FIELDS;
|
||||
|
||||
/* Persist the "last used" value, which
|
||||
typically is AUTO_INCREMENT - 1.
|
||||
In btr_create(), the value 0 was already written. */
|
||||
if (--autoinc) {
|
||||
btr_write_autoinc(
|
||||
dict_table_get_first_index(
|
||||
innobase_table),
|
||||
autoinc);
|
||||
}
|
||||
}
|
||||
|
||||
innobase_table->autoinc_mutex.wr_unlock();
|
||||
}
|
||||
|
||||
innobase_parse_hint_from_comment(m_thd, innobase_table, m_form->s);
|
||||
|
||||
dict_table_close(innobase_table);
|
||||
DBUG_RETURN(0);
|
||||
innobase_parse_hint_from_comment(thd, table, t.s);
|
||||
}
|
||||
|
||||
/** Allocate a new trx. */
|
||||
|
@ -13196,89 +13166,80 @@ create_table_info_t::allocate_trx()
|
|||
@param[in] create_info Create info (including create statement string).
|
||||
@param[in] file_per_table whether to create .ibd file
|
||||
@param[in,out] trx dictionary transaction, or NULL to create new
|
||||
@return 0 if success else error number. */
|
||||
inline int
|
||||
ha_innobase::create(
|
||||
const char* name,
|
||||
TABLE* form,
|
||||
HA_CREATE_INFO* create_info,
|
||||
bool file_per_table,
|
||||
trx_t* trx)
|
||||
@return error code
|
||||
@retval 0 on success */
|
||||
int
|
||||
ha_innobase::create(const char *name, TABLE *form, HA_CREATE_INFO *create_info,
|
||||
bool file_per_table, trx_t *trx= nullptr)
|
||||
{
|
||||
char norm_name[FN_REFLEN]; /* {database}/{tablename} */
|
||||
char remote_path[FN_REFLEN]; /* Absolute path of table */
|
||||
char norm_name[FN_REFLEN]; /* {database}/{tablename} */
|
||||
char remote_path[FN_REFLEN]; /* Absolute path of table */
|
||||
|
||||
DBUG_ENTER("ha_innobase::create");
|
||||
DBUG_ENTER("ha_innobase::create");
|
||||
DBUG_ASSERT(form->s == table_share);
|
||||
DBUG_ASSERT(table_share->table_type == TABLE_TYPE_SEQUENCE ||
|
||||
table_share->table_type == TABLE_TYPE_NORMAL);
|
||||
|
||||
DBUG_ASSERT(form->s == table_share);
|
||||
DBUG_ASSERT(table_share->table_type == TABLE_TYPE_SEQUENCE
|
||||
|| table_share->table_type == TABLE_TYPE_NORMAL);
|
||||
create_table_info_t info(ha_thd(), form, create_info, norm_name,
|
||||
remote_path, file_per_table, trx);
|
||||
|
||||
create_table_info_t info(ha_thd(),
|
||||
form,
|
||||
create_info,
|
||||
norm_name,
|
||||
remote_path,
|
||||
file_per_table, trx);
|
||||
int error= info.initialize();
|
||||
if (!error)
|
||||
error= info.prepare_create_table(name, !trx);
|
||||
if (error)
|
||||
DBUG_RETURN(error);
|
||||
|
||||
{
|
||||
int error = info.initialize();
|
||||
if (!error) {
|
||||
error = info.prepare_create_table(name, !trx);
|
||||
}
|
||||
if (error) {
|
||||
if (trx) {
|
||||
trx_rollback_for_mysql(trx);
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
const bool own_trx= !trx;
|
||||
if (own_trx)
|
||||
{
|
||||
info.allocate_trx();
|
||||
trx= info.trx();
|
||||
DBUG_ASSERT(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
|
||||
|
||||
const bool own_trx = !trx;
|
||||
int error = 0;
|
||||
if (!(info.flags2() & DICT_TF2_TEMPORARY))
|
||||
{
|
||||
trx_start_for_ddl(trx);
|
||||
if (dberr_t err= lock_sys_tables(trx))
|
||||
error= convert_error_code_to_mysql(err, 0, nullptr);
|
||||
}
|
||||
row_mysql_lock_data_dictionary(trx);
|
||||
}
|
||||
|
||||
if (own_trx) {
|
||||
info.allocate_trx();
|
||||
trx = info.trx();
|
||||
DBUG_ASSERT(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
|
||||
}
|
||||
if (own_trx && !(info.flags2() & DICT_TF2_TEMPORARY)) {
|
||||
trx_start_for_ddl(trx);
|
||||
if (dberr_t err = lock_sys_tables(trx)) {
|
||||
error = convert_error_code_to_mysql(err, 0, nullptr);
|
||||
}
|
||||
}
|
||||
if (own_trx) {
|
||||
row_mysql_lock_data_dictionary(trx);
|
||||
}
|
||||
if (!error)
|
||||
error= info.create_table(own_trx);
|
||||
|
||||
if (!error) {
|
||||
error = info.create_table(own_trx);
|
||||
}
|
||||
if (own_trx || (info.flags2() & DICT_TF2_TEMPORARY))
|
||||
{
|
||||
if (error)
|
||||
trx_rollback_for_mysql(trx);
|
||||
else
|
||||
{
|
||||
std::vector<pfs_os_file_t> deleted;
|
||||
trx->commit(deleted);
|
||||
ut_ad(deleted.empty());
|
||||
info.table()->acquire();
|
||||
info.create_table_update_dict(info.table(), info.thd(),
|
||||
*create_info, *form);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
/* Rollback will drop the being-created table. */
|
||||
trx_rollback_for_mysql(trx);
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
} else {
|
||||
/* When this is invoked as part of ha_innobase::truncate(),
|
||||
the old copy of the table will be deleted here. */
|
||||
std::vector<pfs_os_file_t> deleted;
|
||||
trx->commit(deleted);
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
for (pfs_os_file_t d : deleted) os_file_close(d);
|
||||
error = info.create_table_update_dict();
|
||||
if (!(info.flags2() & DICT_TF2_TEMPORARY)) {
|
||||
log_write_up_to(trx->commit_lsn, true);
|
||||
}
|
||||
}
|
||||
if (own_trx)
|
||||
{
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
|
||||
if (own_trx) {
|
||||
trx->free();
|
||||
}
|
||||
if (!error)
|
||||
{
|
||||
dict_stats_update(info.table(), DICT_STATS_EMPTY_TABLE);
|
||||
if (!info.table()->is_temporary())
|
||||
log_write_up_to(trx->commit_lsn, true);
|
||||
info.table()->release();
|
||||
}
|
||||
trx->free();
|
||||
}
|
||||
}
|
||||
else if (!error && m_prebuilt)
|
||||
m_prebuilt->table= info.table();
|
||||
|
||||
DBUG_RETURN(error);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
/** Create a new table to an InnoDB database.
|
||||
|
@ -13286,13 +13247,10 @@ ha_innobase::create(
|
|||
@param[in] form Table format; columns and index information.
|
||||
@param[in] create_info Create info (including create statement string).
|
||||
@return 0 if success else error number. */
|
||||
int
|
||||
ha_innobase::create(
|
||||
const char* name,
|
||||
TABLE* form,
|
||||
HA_CREATE_INFO* create_info)
|
||||
int ha_innobase::create(const char *name, TABLE *form,
|
||||
HA_CREATE_INFO *create_info)
|
||||
{
|
||||
return create(name, form, create_info, srv_file_per_table);
|
||||
return create(name, form, create_info, srv_file_per_table);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
|
@ -13821,229 +13779,247 @@ static dberr_t innobase_rename_table(trx_t *trx, const char *from,
|
|||
@retval 0 on success */
|
||||
int ha_innobase::truncate()
|
||||
{
|
||||
DBUG_ENTER("ha_innobase::truncate");
|
||||
DBUG_ENTER("ha_innobase::truncate");
|
||||
|
||||
update_thd();
|
||||
update_thd();
|
||||
|
||||
if (is_read_only()) {
|
||||
DBUG_RETURN(HA_ERR_TABLE_READONLY);
|
||||
}
|
||||
if (is_read_only())
|
||||
DBUG_RETURN(HA_ERR_TABLE_READONLY);
|
||||
|
||||
HA_CREATE_INFO info;
|
||||
dict_table_t* ib_table = m_prebuilt->table;
|
||||
info.init();
|
||||
update_create_info_from_table(&info, table);
|
||||
switch (dict_tf_get_rec_format(ib_table->flags)) {
|
||||
case REC_FORMAT_REDUNDANT:
|
||||
info.row_type = ROW_TYPE_REDUNDANT;
|
||||
break;
|
||||
case REC_FORMAT_COMPACT:
|
||||
info.row_type = ROW_TYPE_COMPACT;
|
||||
break;
|
||||
case REC_FORMAT_COMPRESSED:
|
||||
info.row_type = ROW_TYPE_COMPRESSED;
|
||||
break;
|
||||
case REC_FORMAT_DYNAMIC:
|
||||
info.row_type = ROW_TYPE_DYNAMIC;
|
||||
break;
|
||||
}
|
||||
HA_CREATE_INFO info;
|
||||
dict_table_t *ib_table= m_prebuilt->table;
|
||||
info.init();
|
||||
update_create_info_from_table(&info, table);
|
||||
switch (dict_tf_get_rec_format(ib_table->flags)) {
|
||||
case REC_FORMAT_REDUNDANT:
|
||||
info.row_type= ROW_TYPE_REDUNDANT;
|
||||
break;
|
||||
case REC_FORMAT_COMPACT:
|
||||
info.row_type= ROW_TYPE_COMPACT;
|
||||
break;
|
||||
case REC_FORMAT_COMPRESSED:
|
||||
info.row_type= ROW_TYPE_COMPRESSED;
|
||||
break;
|
||||
case REC_FORMAT_DYNAMIC:
|
||||
info.row_type= ROW_TYPE_DYNAMIC;
|
||||
break;
|
||||
}
|
||||
|
||||
const auto stored_lock = m_prebuilt->stored_select_lock_type;
|
||||
trx_t* trx = innobase_trx_allocate(m_user_thd);
|
||||
trx_start_for_ddl(trx);
|
||||
const auto stored_lock= m_prebuilt->stored_select_lock_type;
|
||||
trx_t *trx= innobase_trx_allocate(m_user_thd);
|
||||
trx_start_for_ddl(trx);
|
||||
|
||||
if (ib_table->is_temporary()) {
|
||||
info.options|= HA_LEX_CREATE_TMP_TABLE;
|
||||
btr_drop_temporary_table(*ib_table);
|
||||
m_prebuilt->table = nullptr;
|
||||
row_prebuilt_free(m_prebuilt);
|
||||
m_prebuilt = nullptr;
|
||||
my_free(m_upd_buf);
|
||||
m_upd_buf = nullptr;
|
||||
m_upd_buf_size = 0;
|
||||
if (ib_table->is_temporary())
|
||||
{
|
||||
info.options|= HA_LEX_CREATE_TMP_TABLE;
|
||||
btr_drop_temporary_table(*ib_table);
|
||||
m_prebuilt->table= nullptr;
|
||||
row_prebuilt_free(m_prebuilt);
|
||||
m_prebuilt= nullptr;
|
||||
my_free(m_upd_buf);
|
||||
m_upd_buf= nullptr;
|
||||
m_upd_buf_size= 0;
|
||||
|
||||
row_mysql_lock_data_dictionary(trx);
|
||||
ib_table->release();
|
||||
dict_sys.remove(ib_table, false, true);
|
||||
row_mysql_lock_data_dictionary(trx);
|
||||
ib_table->release();
|
||||
dict_sys.remove(ib_table, false, true);
|
||||
int err= create(ib_table->name.m_name, table, &info, true, trx);
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
|
||||
int err = create(ib_table->name.m_name, table, &info, true,
|
||||
trx);
|
||||
ut_ad(!err);
|
||||
if (!err) {
|
||||
err = open(ib_table->name.m_name, 0, 0);
|
||||
m_prebuilt->stored_select_lock_type = stored_lock;
|
||||
}
|
||||
ut_ad(!err);
|
||||
if (!err)
|
||||
{
|
||||
err= open(ib_table->name.m_name, 0, 0);
|
||||
m_prebuilt->table->release();
|
||||
m_prebuilt->stored_select_lock_type= stored_lock;
|
||||
}
|
||||
|
||||
trx->free();
|
||||
trx->free();
|
||||
|
||||
#ifdef BTR_CUR_HASH_ADAPT
|
||||
if (UT_LIST_GET_LEN(ib_table->freed_indexes)) {
|
||||
ib_table->vc_templ = nullptr;
|
||||
ib_table->id = 0;
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
if (UT_LIST_GET_LEN(ib_table->freed_indexes))
|
||||
{
|
||||
ib_table->vc_templ= nullptr;
|
||||
ib_table->id= 0;
|
||||
}
|
||||
else
|
||||
#endif /* BTR_CUR_HASH_ADAPT */
|
||||
dict_mem_table_free(ib_table);
|
||||
|
||||
dict_mem_table_free(ib_table);
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
mem_heap_t* heap = mem_heap_create(1000);
|
||||
mem_heap_t *heap= mem_heap_create(1000);
|
||||
|
||||
dict_get_and_save_data_dir_path(ib_table);
|
||||
info.data_file_name = ib_table->data_dir_path;
|
||||
const char* temp_name = dict_mem_create_temporary_tablename(
|
||||
heap, ib_table->name.m_name, ib_table->id);
|
||||
const char* name = mem_heap_strdup(heap, ib_table->name.m_name);
|
||||
dict_get_and_save_data_dir_path(ib_table);
|
||||
info.data_file_name= ib_table->data_dir_path;
|
||||
const char *temp_name=
|
||||
dict_mem_create_temporary_tablename(heap,
|
||||
ib_table->name.m_name, ib_table->id);
|
||||
const char *name= mem_heap_strdup(heap, ib_table->name.m_name);
|
||||
|
||||
dict_table_t *table_stats = nullptr, *index_stats = nullptr;
|
||||
MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
|
||||
dict_table_t *table_stats = nullptr, *index_stats = nullptr;
|
||||
MDL_ticket *mdl_table = nullptr, *mdl_index = nullptr;
|
||||
|
||||
dberr_t error = DB_SUCCESS;
|
||||
dberr_t error= DB_SUCCESS;
|
||||
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
for (const dict_foreign_t* f : ib_table->referenced_set) {
|
||||
if (dict_table_t* child = f->foreign_table) {
|
||||
error = lock_table_for_trx(child, trx, LOCK_X);
|
||||
if (error != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
dict_sys.unfreeze();
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
for (const dict_foreign_t *f : ib_table->referenced_set)
|
||||
if (dict_table_t *child= f->foreign_table)
|
||||
if ((error= lock_table_for_trx(child, trx, LOCK_X)) != DB_SUCCESS)
|
||||
break;
|
||||
dict_sys.unfreeze();
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
error = lock_table_for_trx(ib_table, trx, LOCK_X);
|
||||
}
|
||||
if (error == DB_SUCCESS)
|
||||
error= lock_table_for_trx(ib_table, trx, LOCK_X);
|
||||
|
||||
const bool fts = error == DB_SUCCESS
|
||||
&& ib_table->flags2 & (DICT_TF2_FTS_HAS_DOC_ID | DICT_TF2_FTS);
|
||||
const bool fts= error == DB_SUCCESS &&
|
||||
ib_table->flags2 & (DICT_TF2_FTS_HAS_DOC_ID | DICT_TF2_FTS);
|
||||
|
||||
if (fts) {
|
||||
fts_optimize_remove_table(ib_table);
|
||||
purge_sys.stop_FTS(*ib_table);
|
||||
error = fts_lock_tables(trx, *ib_table);
|
||||
}
|
||||
if (fts)
|
||||
{
|
||||
fts_optimize_remove_table(ib_table);
|
||||
purge_sys.stop_FTS(*ib_table);
|
||||
error= fts_lock_tables(trx, *ib_table);
|
||||
}
|
||||
|
||||
/* Wait for purge threads to stop using the table. */
|
||||
for (uint n = 15; ib_table->get_ref_count() > 1; ) {
|
||||
if (!--n) {
|
||||
error = DB_LOCK_WAIT_TIMEOUT;
|
||||
break;
|
||||
}
|
||||
/* Wait for purge threads to stop using the table. */
|
||||
for (uint n = 15; ib_table->get_ref_count() > 1; )
|
||||
{
|
||||
if (!--n)
|
||||
{
|
||||
error= DB_LOCK_WAIT_TIMEOUT;
|
||||
break;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(50));
|
||||
}
|
||||
if (error == DB_SUCCESS && dict_stats_is_persistent_enabled(ib_table) &&
|
||||
!ib_table->is_stats_table())
|
||||
{
|
||||
table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
if (table_stats)
|
||||
{
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
table_stats= dict_acquire_mdl_shared<false>(table_stats, m_user_thd,
|
||||
&mdl_table);
|
||||
dict_sys.unfreeze();
|
||||
}
|
||||
index_stats= dict_table_open_on_name(INDEX_STATS_NAME, false,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
if (index_stats)
|
||||
{
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
index_stats= dict_acquire_mdl_shared<false>(index_stats, m_user_thd,
|
||||
&mdl_index);
|
||||
dict_sys.unfreeze();
|
||||
}
|
||||
|
||||
if (error == DB_SUCCESS && dict_stats_is_persistent_enabled(ib_table)
|
||||
&& !ib_table->is_stats_table()) {
|
||||
table_stats= dict_table_open_on_name(TABLE_STATS_NAME, false,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
if (table_stats) {
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
table_stats = dict_acquire_mdl_shared<false>(
|
||||
table_stats, m_user_thd, &mdl_table);
|
||||
dict_sys.unfreeze();
|
||||
}
|
||||
index_stats = dict_table_open_on_name(INDEX_STATS_NAME, false,
|
||||
DICT_ERR_IGNORE_NONE);
|
||||
if (index_stats) {
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
index_stats = dict_acquire_mdl_shared<false>(
|
||||
index_stats, m_user_thd, &mdl_index);
|
||||
dict_sys.unfreeze();
|
||||
}
|
||||
if (table_stats && index_stats &&
|
||||
!strcmp(table_stats->name.m_name, TABLE_STATS_NAME) &&
|
||||
!strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
|
||||
!(error= lock_table_for_trx(table_stats, trx, LOCK_X)))
|
||||
error= lock_table_for_trx(index_stats, trx, LOCK_X);
|
||||
}
|
||||
|
||||
if (table_stats && index_stats
|
||||
&& !strcmp(table_stats->name.m_name, TABLE_STATS_NAME)
|
||||
&& !strcmp(index_stats->name.m_name, INDEX_STATS_NAME) &&
|
||||
!(error = lock_table_for_trx(table_stats, trx, LOCK_X))) {
|
||||
error = lock_table_for_trx(index_stats, trx, LOCK_X);
|
||||
}
|
||||
}
|
||||
if (error == DB_SUCCESS)
|
||||
error= lock_sys_tables(trx);
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
error = lock_sys_tables(trx);
|
||||
}
|
||||
std::vector<pfs_os_file_t> deleted;
|
||||
|
||||
row_mysql_lock_data_dictionary(trx);
|
||||
row_mysql_lock_data_dictionary(trx);
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
error = innobase_rename_table(trx, ib_table->name.m_name,
|
||||
temp_name, false);
|
||||
if (error == DB_SUCCESS)
|
||||
{
|
||||
error= innobase_rename_table(trx, ib_table->name.m_name, temp_name, false);
|
||||
if (error == DB_SUCCESS)
|
||||
error= trx->drop_table(*ib_table);
|
||||
}
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
error = trx->drop_table(*ib_table);
|
||||
}
|
||||
}
|
||||
int err = convert_error_code_to_mysql(error, ib_table->flags, m_user_thd);
|
||||
const auto update_time = ib_table->update_time;
|
||||
|
||||
int err = convert_error_code_to_mysql(error, ib_table->flags,
|
||||
m_user_thd);
|
||||
if (err) {
|
||||
trx_rollback_for_mysql(trx);
|
||||
if (fts) {
|
||||
fts_optimize_add_table(ib_table);
|
||||
purge_sys.resume_FTS();
|
||||
}
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
} else {
|
||||
const auto update_time = ib_table->update_time;
|
||||
const auto stored_lock = m_prebuilt->stored_select_lock_type;
|
||||
const auto def_trx_id = ib_table->def_trx_id;
|
||||
ib_table->release();
|
||||
m_prebuilt->table = nullptr;
|
||||
if (err)
|
||||
{
|
||||
trx_rollback_for_mysql(trx);
|
||||
if (fts)
|
||||
fts_optimize_add_table(ib_table);
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto def_trx_id= ib_table->def_trx_id;
|
||||
ib_table->release();
|
||||
m_prebuilt->table= nullptr;
|
||||
|
||||
err = create(name, table, &info,
|
||||
dict_table_is_file_per_table(ib_table), trx);
|
||||
/* On success, create() durably committed trx. */
|
||||
if (fts) {
|
||||
purge_sys.resume_FTS();
|
||||
}
|
||||
err= create(name, table, &info, dict_table_is_file_per_table(ib_table),
|
||||
trx);
|
||||
if (!err)
|
||||
{
|
||||
m_prebuilt->table->acquire();
|
||||
create_table_info_t::create_table_update_dict(m_prebuilt->table,
|
||||
m_user_thd, info, *table);
|
||||
trx->commit(deleted);
|
||||
}
|
||||
else
|
||||
{
|
||||
trx_rollback_for_mysql(trx);
|
||||
m_prebuilt->table= dict_table_open_on_name(name, true,
|
||||
DICT_ERR_IGNORE_FK_NOKEY);
|
||||
m_prebuilt->table->def_trx_id= def_trx_id;
|
||||
}
|
||||
dict_names_t fk_tables;
|
||||
dict_load_foreigns(m_prebuilt->table->name.m_name, nullptr, 1, true,
|
||||
DICT_ERR_IGNORE_FK_NOKEY, fk_tables);
|
||||
for (const char *f : fk_tables)
|
||||
dict_sys.load_table({f, strlen(f)});
|
||||
}
|
||||
|
||||
if (err) {
|
||||
reload:
|
||||
m_prebuilt->table = dict_table_open_on_name(
|
||||
name, false, DICT_ERR_IGNORE_NONE);
|
||||
m_prebuilt->table->def_trx_id = def_trx_id;
|
||||
} else {
|
||||
row_prebuilt_t* prebuilt = m_prebuilt;
|
||||
uchar* upd_buf = m_upd_buf;
|
||||
ulint upd_buf_size = m_upd_buf_size;
|
||||
/* Mimic ha_innobase::close(). */
|
||||
m_prebuilt = nullptr;
|
||||
m_upd_buf = nullptr;
|
||||
m_upd_buf_size = 0;
|
||||
if (fts)
|
||||
purge_sys.resume_FTS();
|
||||
|
||||
err = open(name, 0, 0);
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
for (pfs_os_file_t d : deleted) os_file_close(d);
|
||||
|
||||
if (!err) {
|
||||
m_prebuilt->stored_select_lock_type
|
||||
= stored_lock;
|
||||
m_prebuilt->table->update_time = update_time;
|
||||
row_prebuilt_free(prebuilt);
|
||||
my_free(upd_buf);
|
||||
} else {
|
||||
/* Revert to the old table. */
|
||||
m_prebuilt = prebuilt;
|
||||
m_upd_buf = upd_buf;
|
||||
m_upd_buf_size = upd_buf_size;
|
||||
goto reload;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!err)
|
||||
{
|
||||
dict_stats_update(m_prebuilt->table, DICT_STATS_EMPTY_TABLE);
|
||||
log_write_up_to(trx->commit_lsn, true);
|
||||
row_prebuilt_t *prebuilt= m_prebuilt;
|
||||
uchar *upd_buf= m_upd_buf;
|
||||
ulint upd_buf_size= m_upd_buf_size;
|
||||
/* Mimic ha_innobase::close(). */
|
||||
m_prebuilt= nullptr;
|
||||
m_upd_buf= nullptr;
|
||||
m_upd_buf_size= 0;
|
||||
|
||||
trx->free();
|
||||
err= open(name, 0, 0);
|
||||
if (!err)
|
||||
{
|
||||
m_prebuilt->stored_select_lock_type= stored_lock;
|
||||
m_prebuilt->table->update_time= update_time;
|
||||
row_prebuilt_free(prebuilt);
|
||||
my_free(upd_buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Revert to the old table. */
|
||||
m_prebuilt= prebuilt;
|
||||
m_upd_buf= upd_buf;
|
||||
m_upd_buf_size= upd_buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
mem_heap_free(heap);
|
||||
trx->free();
|
||||
|
||||
if (table_stats) {
|
||||
dict_table_close(table_stats, false, m_user_thd, mdl_table);
|
||||
}
|
||||
if (index_stats) {
|
||||
dict_table_close(index_stats, false, m_user_thd, mdl_index);
|
||||
}
|
||||
mem_heap_free(heap);
|
||||
|
||||
DBUG_RETURN(err);
|
||||
if (table_stats)
|
||||
dict_table_close(table_stats, false, m_user_thd, mdl_table);
|
||||
if (index_stats)
|
||||
dict_table_close(index_stats, false, m_user_thd, mdl_index);
|
||||
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
@ -15637,30 +15613,12 @@ delete is then allowed internally to resolve a duplicate key conflict in
|
|||
REPLACE, not an update.
|
||||
@return > 0 if referenced by a FOREIGN KEY */
|
||||
|
||||
uint
|
||||
ha_innobase::referenced_by_foreign_key(void)
|
||||
/*========================================*/
|
||||
uint ha_innobase::referenced_by_foreign_key()
|
||||
{
|
||||
if (dict_table_is_referenced_by_foreign_key(m_prebuilt->table)) {
|
||||
|
||||
return(1);
|
||||
}
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
Frees the foreign key create info for a table stored in InnoDB, if it is
|
||||
non-NULL. */
|
||||
|
||||
void
|
||||
ha_innobase::free_foreign_key_create_info(
|
||||
/*======================================*/
|
||||
char* str) /*!< in, own: create info string to free */
|
||||
{
|
||||
if (str != NULL) {
|
||||
my_free(str);
|
||||
}
|
||||
dict_sys.freeze(SRW_LOCK_CALL);
|
||||
const bool empty= m_prebuilt->table->referenced_set.empty();
|
||||
dict_sys.unfreeze();
|
||||
return !empty;
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
|
@ -20639,6 +20597,26 @@ bool ha_innobase::can_convert_blob(const Field_blob *field,
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool ha_innobase::can_convert_nocopy(const Field &field,
|
||||
const Column_definition &new_type) const
|
||||
{
|
||||
if (const Field_string *tf= dynamic_cast<const Field_string *>(&field))
|
||||
return can_convert_string(tf, new_type);
|
||||
|
||||
if (const Field_varstring *tf= dynamic_cast<const Field_varstring *>(&field))
|
||||
return can_convert_varstring(tf, new_type);
|
||||
|
||||
if (dynamic_cast<const Field_geom *>(&field))
|
||||
return false;
|
||||
|
||||
if (const Field_blob *tf= dynamic_cast<const Field_blob *>(&field))
|
||||
return can_convert_blob(tf, new_type);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
Compare_keys ha_innobase::compare_key_parts(
|
||||
const Field &old_field, const Column_definition &new_field,
|
||||
const KEY_PART_INFO &old_part, const KEY_PART_INFO &new_part) const
|
||||
|
@ -20649,7 +20627,7 @@ Compare_keys ha_innobase::compare_key_parts(
|
|||
|
||||
if (!is_equal)
|
||||
{
|
||||
if (!old_field.can_be_converted_by_engine(new_field))
|
||||
if (!old_field.table->file->can_convert_nocopy(old_field, new_field))
|
||||
return Compare_keys::NotEqual;
|
||||
|
||||
if (!Charset(old_cs).eq_collation_specific_names(new_cs))
|
||||
|
|
|
@ -190,12 +190,12 @@ public:
|
|||
|
||||
void update_create_info(HA_CREATE_INFO* create_info) override;
|
||||
|
||||
inline int create(
|
||||
int create(
|
||||
const char* name,
|
||||
TABLE* form,
|
||||
HA_CREATE_INFO* create_info,
|
||||
bool file_per_table,
|
||||
trx_t* trx = NULL);
|
||||
trx_t* trx);
|
||||
|
||||
int create(
|
||||
const char* name,
|
||||
|
@ -225,7 +225,7 @@ public:
|
|||
|
||||
uint referenced_by_foreign_key() override;
|
||||
|
||||
void free_foreign_key_create_info(char* str) override;
|
||||
void free_foreign_key_create_info(char* str) override { my_free(str); }
|
||||
|
||||
uint lock_count(void) const override;
|
||||
|
||||
|
@ -422,15 +422,9 @@ public:
|
|||
@retval false if pushed (always) */
|
||||
bool rowid_filter_push(Rowid_filter *rowid_filter) override;
|
||||
|
||||
bool
|
||||
can_convert_string(const Field_string* field,
|
||||
const Column_definition& new_field) const override;
|
||||
bool can_convert_varstring(
|
||||
const Field_varstring* field,
|
||||
const Column_definition& new_field) const override;
|
||||
bool
|
||||
can_convert_blob(const Field_blob* field,
|
||||
const Column_definition& new_field) const override;
|
||||
bool can_convert_nocopy(const Field &field,
|
||||
const Column_definition& new_field) const
|
||||
override;
|
||||
|
||||
/** @return whether innodb_strict_mode is active */
|
||||
static bool is_innodb_strict_mode(THD* thd);
|
||||
|
@ -445,6 +439,16 @@ public:
|
|||
const KEY_PART_INFO& new_part) const override;
|
||||
|
||||
protected:
|
||||
bool
|
||||
can_convert_string(const Field_string* field,
|
||||
const Column_definition& new_field) const;
|
||||
bool can_convert_varstring(
|
||||
const Field_varstring* field,
|
||||
const Column_definition& new_field) const;
|
||||
bool
|
||||
can_convert_blob(const Field_blob* field,
|
||||
const Column_definition& new_field) const;
|
||||
|
||||
dberr_t innobase_get_autoinc(ulonglong* value);
|
||||
dberr_t innobase_lock_autoinc();
|
||||
ulonglong innobase_peek_autoinc();
|
||||
|
@ -639,8 +643,9 @@ public:
|
|||
@param create_fk whether to add FOREIGN KEY constraints */
|
||||
int create_table(bool create_fk = true);
|
||||
|
||||
/** Update the internal data dictionary. */
|
||||
int create_table_update_dict();
|
||||
static void create_table_update_dict(dict_table_t* table, THD* thd,
|
||||
const HA_CREATE_INFO& info,
|
||||
const TABLE& t);
|
||||
|
||||
/** Validates the create options. Checks that the options
|
||||
KEY_BLOCK_SIZE, ROW_FORMAT, DATA DIRECTORY, TEMPORARY & TABLESPACE
|
||||
|
@ -700,12 +705,13 @@ public:
|
|||
trx_t* trx() const
|
||||
{ return(m_trx); }
|
||||
|
||||
/** Return table name. */
|
||||
const char* table_name() const
|
||||
{ return(m_table_name); }
|
||||
/** @return table name */
|
||||
const char* table_name() const { return(m_table_name); }
|
||||
|
||||
THD* thd() const
|
||||
{ return(m_thd); }
|
||||
/** @return the created table */
|
||||
dict_table_t *table() const { return m_table; }
|
||||
|
||||
THD* thd() const { return(m_thd); }
|
||||
|
||||
private:
|
||||
/** Parses the table name into normal name and either temp path or
|
||||
|
|
|
@ -7286,13 +7286,10 @@ error_handling_drop_uncached:
|
|||
goto error_handling;
|
||||
}
|
||||
|
||||
ctx->new_table->fts->dict_locked = true;
|
||||
|
||||
error = innobase_fts_load_stopword(
|
||||
ctx->new_table, ctx->trx,
|
||||
ctx->prebuilt->trx->mysql_thd)
|
||||
? DB_SUCCESS : DB_ERROR;
|
||||
ctx->new_table->fts->dict_locked = false;
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
goto error_handling;
|
||||
|
@ -9882,7 +9879,7 @@ innobase_update_foreign_cache(
|
|||
|
||||
err = dict_load_foreigns(user_table->name.m_name,
|
||||
ctx->col_names, 1, true,
|
||||
DICT_ERR_IGNORE_NONE,
|
||||
DICT_ERR_IGNORE_FK_NOKEY,
|
||||
fk_tables);
|
||||
|
||||
if (err == DB_CANNOT_ADD_CONSTRAINT) {
|
||||
|
|
|
@ -6491,7 +6491,9 @@ static int i_s_sys_tablespaces_fill_table(THD *thd, TABLE_LIST *tables, Item*)
|
|||
{
|
||||
space.reacquire();
|
||||
mysql_mutex_unlock(&fil_system.mutex);
|
||||
space.s_lock();
|
||||
err= i_s_sys_tablespaces_fill(thd, space, tables->table);
|
||||
space.s_unlock();
|
||||
mysql_mutex_lock(&fil_system.mutex);
|
||||
space.release();
|
||||
if (err)
|
||||
|
@ -6719,8 +6721,10 @@ i_s_tablespaces_encryption_fill_table(
|
|||
&& !space.is_stopping()) {
|
||||
space.reacquire();
|
||||
mysql_mutex_unlock(&fil_system.mutex);
|
||||
space.s_lock();
|
||||
err = i_s_dict_fill_tablespaces_encryption(
|
||||
thd, &space, tables->table);
|
||||
space.s_unlock();
|
||||
mysql_mutex_lock(&fil_system.mutex);
|
||||
space.release();
|
||||
if (err) {
|
||||
|
|
|
@ -2464,6 +2464,7 @@ ibuf_merge_space(
|
|||
|
||||
ut_ad(space < SRV_SPACE_ID_UPPER_BOUND);
|
||||
|
||||
log_free_check();
|
||||
ibuf_mtr_start(&mtr);
|
||||
|
||||
/* Position the cursor on the first matching record. */
|
||||
|
@ -2566,6 +2567,7 @@ ulint ibuf_merge_all()
|
|||
ulint n_pages = srv_io_capacity;
|
||||
|
||||
for (ulint sum_pages = 0; sum_pages < n_pages; ) {
|
||||
log_free_check();
|
||||
ulint n_pag2;
|
||||
ulint n_bytes = ibuf_merge(&n_pag2);
|
||||
|
||||
|
@ -4461,7 +4463,7 @@ reset_bit:
|
|||
}
|
||||
|
||||
/** Delete all change buffer entries for a tablespace,
|
||||
in DISCARD TABLESPACE, IMPORT TABLESPACE, or crash recovery.
|
||||
in DISCARD TABLESPACE, IMPORT TABLESPACE, or read-ahead.
|
||||
@param[in] space missing or to-be-discarded tablespace */
|
||||
void ibuf_delete_for_discarded_space(uint32_t space)
|
||||
{
|
||||
|
@ -4483,6 +4485,7 @@ void ibuf_delete_for_discarded_space(uint32_t space)
|
|||
|
||||
memset(dops, 0, sizeof(dops));
|
||||
loop:
|
||||
log_free_check();
|
||||
ibuf_mtr_start(&mtr);
|
||||
|
||||
/* Position pcur in the insert buffer at the first entry for the
|
||||
|
@ -4622,9 +4625,6 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
|
|||
}
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
|
||||
|
||||
ibuf_enter(&mtr);
|
||||
|
||||
buf_block_t* bitmap_page = ibuf_bitmap_get_map_page(
|
||||
|
@ -4712,29 +4712,18 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
|
|||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/** Updates free bits and buffered bits for bulk loaded page.
|
||||
@param[in] block index page
|
||||
@param[in] reset flag if reset free val */
|
||||
void
|
||||
ibuf_set_bitmap_for_bulk_load(
|
||||
buf_block_t* block,
|
||||
bool reset)
|
||||
void ibuf_set_bitmap_for_bulk_load(buf_block_t *block, mtr_t *mtr, bool reset)
|
||||
{
|
||||
mtr_t mtr;
|
||||
|
||||
ut_a(page_is_leaf(block->page.frame));
|
||||
mtr.start();
|
||||
fil_space_t *space= mtr.set_named_space_id(block->page.id().space());
|
||||
|
||||
if (buf_block_t *bitmap_page=
|
||||
ibuf_bitmap_get_map_page(block->page.id(), space->zip_size(), &mtr))
|
||||
ibuf_bitmap_get_map_page(block->page.id(), block->zip_size(), mtr))
|
||||
{
|
||||
ulint free_val= reset ? 0 : ibuf_index_page_calc_free(block);
|
||||
/* FIXME: update the bitmap byte only once! */
|
||||
ibuf_bitmap_page_set_bits<IBUF_BITMAP_FREE>
|
||||
(bitmap_page, block->page.id(), block->physical_size(), free_val, &mtr);
|
||||
(bitmap_page, block->page.id(), block->physical_size(), free_val, mtr);
|
||||
ibuf_bitmap_page_set_bits<IBUF_BITMAP_BUFFERED>
|
||||
(bitmap_page, block->page.id(), block->physical_size(), false, &mtr);
|
||||
(bitmap_page, block->page.id(), block->physical_size(), false, mtr);
|
||||
}
|
||||
mtr.commit();
|
||||
}
|
||||
|
|
|
@ -421,14 +421,6 @@ dict_foreign_add_to_cache(
|
|||
dict_err_ignore_t ignore_err)
|
||||
/*!< in: error to be ignored */
|
||||
MY_ATTRIBUTE((nonnull(1), warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Checks if a table is referenced by foreign keys.
|
||||
@return TRUE if table is referenced by a foreign key */
|
||||
ibool
|
||||
dict_table_is_referenced_by_foreign_key(
|
||||
/*====================================*/
|
||||
const dict_table_t* table) /*!< in: InnoDB table */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
/**********************************************************************//**
|
||||
Replace the index passed in with another equivalent index in the
|
||||
foreign key lists of the table.
|
||||
|
@ -1326,7 +1318,7 @@ class dict_sys_t
|
|||
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_lock latch;
|
||||
#ifdef UNIV_DEBUG
|
||||
/** whether latch is being held in exclusive mode (by any thread) */
|
||||
bool latch_ex;
|
||||
Atomic_relaxed<pthread_t> latch_ex;
|
||||
/** number of S-latch holders */
|
||||
Atomic_counter<uint32_t> latch_readers;
|
||||
#endif
|
||||
|
@ -1500,11 +1492,12 @@ public:
|
|||
/** @return whether any thread (not necessarily the current thread)
|
||||
is holding the latch; that is, this check may return false
|
||||
positives */
|
||||
bool frozen() const { return latch_readers || locked(); }
|
||||
bool frozen() const { return latch_readers || latch_ex; }
|
||||
/** @return whether any thread (not necessarily the current thread)
|
||||
is holding the exclusive latch; that is, this check may return false
|
||||
positives */
|
||||
bool locked() const { return latch_ex; }
|
||||
is holding a shared latch */
|
||||
bool frozen_not_locked() const { return latch_readers; }
|
||||
/** @return whether the current thread holds the exclusive latch */
|
||||
bool locked() const { return latch_ex == pthread_self(); }
|
||||
#endif
|
||||
private:
|
||||
/** Acquire the exclusive latch */
|
||||
|
@ -1523,7 +1516,7 @@ public:
|
|||
{
|
||||
ut_ad(!latch_readers);
|
||||
ut_ad(!latch_ex);
|
||||
ut_d(latch_ex= true);
|
||||
ut_d(latch_ex= pthread_self());
|
||||
}
|
||||
else
|
||||
lock_wait(SRW_LOCK_ARGS(file, line));
|
||||
|
@ -1540,9 +1533,9 @@ public:
|
|||
/** Unlock the data dictionary cache. */
|
||||
void unlock()
|
||||
{
|
||||
ut_ad(latch_ex);
|
||||
ut_ad(latch_ex == pthread_self());
|
||||
ut_ad(!latch_readers);
|
||||
ut_d(latch_ex= false);
|
||||
ut_d(latch_ex= 0);
|
||||
latch.wr_unlock();
|
||||
}
|
||||
/** Acquire a shared lock on the dictionary cache. */
|
||||
|
|
|
@ -100,7 +100,7 @@ dict_load_foreigns(
|
|||
which must be loaded
|
||||
subsequently to load all the
|
||||
foreign key constraints. */
|
||||
MY_ATTRIBUTE((nonnull(1), warn_unused_result));
|
||||
MY_ATTRIBUTE((nonnull(1)));
|
||||
|
||||
/********************************************************************//**
|
||||
This function opens a system table, and return the first record.
|
||||
|
|
|
@ -264,7 +264,6 @@ ibuf_page_low(
|
|||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
|
||||
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
|
||||
Must not be called when recv_no_ibuf_operations==true.
|
||||
@param[in] page_id tablespace/page identifier
|
||||
|
@ -274,7 +273,7 @@ Must not be called when recv_no_ibuf_operations==true.
|
|||
# define ibuf_page(page_id, zip_size, mtr) \
|
||||
ibuf_page_low(page_id, zip_size, true, mtr)
|
||||
|
||||
#else /* UVIV_DEBUG */
|
||||
#else /* UNIV_DEBUG */
|
||||
|
||||
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
|
||||
Must not be called when recv_no_ibuf_operations==true.
|
||||
|
@ -285,7 +284,7 @@ Must not be called when recv_no_ibuf_operations==true.
|
|||
# define ibuf_page(page_id, zip_size, mtr) \
|
||||
ibuf_page_low(page_id, zip_size, mtr)
|
||||
|
||||
#endif /* UVIV_DEBUG */
|
||||
#endif /* UNIV_DEBUG */
|
||||
/***********************************************************************//**
|
||||
Frees excess pages from the ibuf free list. This function is called when an OS
|
||||
thread calls fsp services to allocate a new file segment, or a new page to a
|
||||
|
@ -334,7 +333,7 @@ dberr_t ibuf_merge_or_delete_for_page(buf_block_t *block,
|
|||
ulint zip_size);
|
||||
|
||||
/** Delete all change buffer entries for a tablespace,
|
||||
in DISCARD TABLESPACE, IMPORT TABLESPACE, or crash recovery.
|
||||
in DISCARD TABLESPACE, IMPORT TABLESPACE, or read-ahead.
|
||||
@param[in] space missing or to-be-discarded tablespace */
|
||||
void ibuf_delete_for_discarded_space(uint32_t space);
|
||||
|
||||
|
@ -385,13 +384,11 @@ ibuf_close(void);
|
|||
dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Updates free bits and buffered bits for bulk loaded page.
|
||||
@param[in] block index page
|
||||
@param]in] reset flag if reset free val */
|
||||
void
|
||||
ibuf_set_bitmap_for_bulk_load(
|
||||
buf_block_t* block,
|
||||
bool reset);
|
||||
/** Update free bits and buffered bits for bulk loaded page.
|
||||
@param block secondary index leaf page
|
||||
@param mtr mini-transaction
|
||||
@param reset whether the page is full */
|
||||
void ibuf_set_bitmap_for_bulk_load(buf_block_t *block, mtr_t *mtr, bool reset);
|
||||
|
||||
#define IBUF_HEADER_PAGE_NO FSP_IBUF_HEADER_PAGE_NO
|
||||
#define IBUF_TREE_ROOT_PAGE_NO FSP_IBUF_TREE_ROOT_PAGE_NO
|
||||
|
|
|
@ -794,17 +794,6 @@ page_rec_is_first(
|
|||
const page_t* page) /*!< in: page */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/************************************************************//**
|
||||
true if the record is the second user record on a page.
|
||||
@return true if the second user record */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
page_rec_is_second(
|
||||
/*===============*/
|
||||
const rec_t* rec, /*!< in: record */
|
||||
const page_t* page) /*!< in: page */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/************************************************************//**
|
||||
true if the record is the last user record on a page.
|
||||
@return true if the last user record */
|
||||
|
@ -816,17 +805,6 @@ page_rec_is_last(
|
|||
const page_t* page) /*!< in: page */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/************************************************************//**
|
||||
true if the record is the second last user record on a page.
|
||||
@return true if the second last user record */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
page_rec_is_second_last(
|
||||
/*====================*/
|
||||
const rec_t* rec, /*!< in: record */
|
||||
const page_t* page) /*!< in: page */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/************************************************************//**
|
||||
Returns the maximum combined size of records which can be inserted on top
|
||||
of record heap.
|
||||
|
|
|
@ -192,22 +192,6 @@ page_rec_is_first(
|
|||
return(page_rec_get_next_const(page_get_infimum_rec(page)) == rec);
|
||||
}
|
||||
|
||||
/************************************************************//**
|
||||
true if the record is the second user record on a page.
|
||||
@return true if the second user record */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
page_rec_is_second(
|
||||
/*===============*/
|
||||
const rec_t* rec, /*!< in: record */
|
||||
const page_t* page) /*!< in: page */
|
||||
{
|
||||
ut_ad(page_get_n_recs(page) > 1);
|
||||
if (const rec_t *first= page_rec_get_next_const(page_get_infimum_rec(page)))
|
||||
return page_rec_get_next_const(first) == rec;
|
||||
return false;
|
||||
}
|
||||
|
||||
/************************************************************//**
|
||||
true if the record is the last user record on a page.
|
||||
@return true if the last user record */
|
||||
|
@ -223,24 +207,6 @@ page_rec_is_last(
|
|||
return(page_rec_get_next_const(rec) == page_get_supremum_rec(page));
|
||||
}
|
||||
|
||||
/************************************************************//**
|
||||
true if the record is the second last user record on a page.
|
||||
@return true if the second last user record */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
page_rec_is_second_last(
|
||||
/*====================*/
|
||||
const rec_t* rec, /*!< in: record */
|
||||
const page_t* page) /*!< in: page */
|
||||
{
|
||||
ut_ad(page_get_n_recs(page) > 1);
|
||||
ut_ad(!page_rec_is_last(rec, page));
|
||||
|
||||
if (const rec_t *next= page_rec_get_next_const(rec))
|
||||
return page_rec_is_supremum(page_rec_get_next_const(next));
|
||||
return false;
|
||||
}
|
||||
|
||||
/************************************************************//**
|
||||
Returns the middle record of the records on the page. If there is an
|
||||
even number of records in the list, returns the first record of the
|
||||
|
|
|
@ -430,6 +430,10 @@ class trx_mod_table_time_t
|
|||
/** First modification of a system versioned column
|
||||
(NONE= no versioning, BULK= the table was dropped) */
|
||||
undo_no_t first_versioned= NONE;
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Whether the modified table is a FTS auxiliary table */
|
||||
bool fts_aux_table= false;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** Buffer to store insert opertion */
|
||||
row_merge_bulk_t *bulk_store= nullptr;
|
||||
|
@ -496,6 +500,12 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
void set_aux_table() { fts_aux_table= true; }
|
||||
|
||||
bool is_aux_table() const { return fts_aux_table; }
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** @return the first undo record that modified the table */
|
||||
undo_no_t get_first() const
|
||||
{
|
||||
|
|
|
@ -3934,8 +3934,7 @@ void lock_release(trx_t *trx)
|
|||
#ifdef UNIV_DEBUG
|
||||
std::set<table_id_t> to_evict;
|
||||
if (innodb_evict_tables_on_commit_debug &&
|
||||
!trx->is_recovered && !trx->dict_operation &&
|
||||
!trx->dict_operation_lock_mode)
|
||||
!trx->is_recovered && !dict_sys.locked())
|
||||
for (const auto& p : trx->mod_tables)
|
||||
if (!p.first->is_temporary())
|
||||
to_evict.emplace(p.first->id);
|
||||
|
|
|
@ -1828,8 +1828,8 @@ ATTRIBUTE_COLD static dberr_t recv_log_recover_pre_10_2()
|
|||
log_sys.set_flushed_lsn(lsn);
|
||||
const lsn_t source_offset= log_sys.log.calc_lsn_offset_old(lsn);
|
||||
|
||||
static const char NO_UPGRADE_RECOVERY_MSG[]=
|
||||
"Upgrade after a crash is not supported."
|
||||
static constexpr char NO_UPGRADE_RECOVERY_MSG[]=
|
||||
"InnoDB: Upgrade after a crash is not supported."
|
||||
" This redo log was created before MariaDB 10.2.2";
|
||||
|
||||
recv_sys.read(source_offset & ~511, {buf, 512});
|
||||
|
@ -1837,8 +1837,7 @@ ATTRIBUTE_COLD static dberr_t recv_log_recover_pre_10_2()
|
|||
if (log_block_calc_checksum_format_0(buf) != log_block_get_checksum(buf) &&
|
||||
!log_crypt_101_read_block(buf, lsn))
|
||||
{
|
||||
sql_print_error("InnoDB: %s, and it appears corrupted.",
|
||||
NO_UPGRADE_RECOVERY_MSG);
|
||||
sql_print_error("%s, and it appears corrupted.", NO_UPGRADE_RECOVERY_MSG);
|
||||
return DB_CORRUPTION;
|
||||
}
|
||||
|
||||
|
@ -1858,7 +1857,10 @@ ATTRIBUTE_COLD static dberr_t recv_log_recover_pre_10_2()
|
|||
sql_print_error("InnoDB: Cannot decrypt log for upgrading."
|
||||
" The encrypted log was created before MariaDB 10.2.2.");
|
||||
else
|
||||
sql_print_error("InnoDB: %s.", NO_UPGRADE_RECOVERY_MSG);
|
||||
sql_print_error("%s. You must start up and shut down"
|
||||
" MariaDB 10.1 or MySQL 5.6 or earlier"
|
||||
" on the data directory.",
|
||||
NO_UPGRADE_RECOVERY_MSG);
|
||||
|
||||
return DB_ERROR;
|
||||
}
|
||||
|
@ -1964,7 +1966,7 @@ recv_find_max_checkpoint(ulint* max_field)
|
|||
if (log_sys.log.format != log_t::FORMAT_3_23
|
||||
&& !recv_check_log_header_checksum(buf)) {
|
||||
sql_print_error("InnoDB: Invalid redo log header checksum.");
|
||||
return(DB_CORRUPTION);
|
||||
return DB_CORRUPTION;
|
||||
}
|
||||
|
||||
char creator[LOG_HEADER_CREATOR_END - LOG_HEADER_CREATOR + 1];
|
||||
|
@ -1988,7 +1990,7 @@ recv_find_max_checkpoint(ulint* max_field)
|
|||
default:
|
||||
sql_print_error("InnoDB: Unsupported redo log format."
|
||||
" The redo log was created with %s.", creator);
|
||||
return(DB_ERROR);
|
||||
return DB_ERROR;
|
||||
}
|
||||
|
||||
for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2;
|
||||
|
@ -2044,7 +2046,7 @@ recv_find_max_checkpoint(ulint* max_field)
|
|||
" (corrupted redo log)."
|
||||
" You can try --innodb-force-recovery=6"
|
||||
" as a last resort.");
|
||||
return(DB_ERROR);
|
||||
return DB_ERROR;
|
||||
}
|
||||
|
||||
switch (log_sys.log.format) {
|
||||
|
@ -2053,12 +2055,15 @@ recv_find_max_checkpoint(ulint* max_field)
|
|||
break;
|
||||
default:
|
||||
if (dberr_t err = recv_log_recover_10_4()) {
|
||||
sql_print_error("InnoDB: Upgrade after a crash"
|
||||
" is not supported."
|
||||
sql_print_error("InnoDB: Upgrade after a crash "
|
||||
"is not supported."
|
||||
" The redo log was created with %s%s.",
|
||||
creator,
|
||||
(err == DB_ERROR
|
||||
? "" : ", and it appears corrupted"));
|
||||
err == DB_ERROR
|
||||
? ". You must start up and shut down"
|
||||
" MariaDB 10.4 or earlier"
|
||||
" on the data directory"
|
||||
: ", and it appears corrupted");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
global:
|
||||
_maria_plugin_interface_version_;
|
||||
_maria_sizeof_struct_st_plugin_;
|
||||
_maria_plugin_declarations_;
|
||||
my_snprintf_service;
|
||||
thd_alloc_service;
|
||||
thd_autoinc_service;
|
||||
thd_error_context_service;
|
||||
thd_kill_statement_service;
|
||||
thd_wait_service;
|
||||
local:
|
||||
*;
|
||||
};
|
|
@ -260,17 +260,10 @@ que_graph_free_recursive(
|
|||
ind_node_t* cre_ind;
|
||||
purge_node_t* purge;
|
||||
|
||||
DBUG_ENTER("que_graph_free_recursive");
|
||||
|
||||
if (node == NULL) {
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
return;
|
||||
}
|
||||
|
||||
DBUG_PRINT("que_graph_free_recursive",
|
||||
("node: %p, type: " ULINTPF, node,
|
||||
que_node_get_type(node)));
|
||||
|
||||
switch (que_node_get_type(node)) {
|
||||
|
||||
case QUE_NODE_FORK:
|
||||
|
@ -410,8 +403,6 @@ que_graph_free_recursive(
|
|||
default:
|
||||
ut_error;
|
||||
}
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
|
@ -507,66 +498,6 @@ que_node_get_containing_loop_node(
|
|||
return(node);
|
||||
}
|
||||
|
||||
#ifdef DBUG_TRACE
|
||||
/** Gets information of an SQL query graph node.
|
||||
@return type description */
|
||||
static MY_ATTRIBUTE((warn_unused_result, nonnull))
|
||||
const char*
|
||||
que_node_type_string(
|
||||
/*=================*/
|
||||
const que_node_t* node) /*!< in: query graph node */
|
||||
{
|
||||
switch (que_node_get_type(node)) {
|
||||
case QUE_NODE_SELECT:
|
||||
return("SELECT");
|
||||
case QUE_NODE_INSERT:
|
||||
return("INSERT");
|
||||
case QUE_NODE_UPDATE:
|
||||
return("UPDATE");
|
||||
case QUE_NODE_WHILE:
|
||||
return("WHILE");
|
||||
case QUE_NODE_ASSIGNMENT:
|
||||
return("ASSIGNMENT");
|
||||
case QUE_NODE_IF:
|
||||
return("IF");
|
||||
case QUE_NODE_FETCH:
|
||||
return("FETCH");
|
||||
case QUE_NODE_OPEN:
|
||||
return("OPEN");
|
||||
case QUE_NODE_PROC:
|
||||
return("STORED PROCEDURE");
|
||||
case QUE_NODE_FUNC:
|
||||
return("FUNCTION");
|
||||
case QUE_NODE_LOCK:
|
||||
return("LOCK");
|
||||
case QUE_NODE_THR:
|
||||
return("QUERY THREAD");
|
||||
case QUE_NODE_COMMIT:
|
||||
return("COMMIT");
|
||||
case QUE_NODE_UNDO:
|
||||
return("UNDO ROW");
|
||||
case QUE_NODE_PURGE:
|
||||
return("PURGE ROW");
|
||||
case QUE_NODE_ROLLBACK:
|
||||
return("ROLLBACK");
|
||||
case QUE_NODE_CREATE_TABLE:
|
||||
return("CREATE TABLE");
|
||||
case QUE_NODE_CREATE_INDEX:
|
||||
return("CREATE INDEX");
|
||||
case QUE_NODE_FOR:
|
||||
return("FOR LOOP");
|
||||
case QUE_NODE_RETURN:
|
||||
return("RETURN");
|
||||
case QUE_NODE_EXIT:
|
||||
return("EXIT");
|
||||
default:
|
||||
ut_ad(0);
|
||||
return("UNKNOWN NODE TYPE");
|
||||
}
|
||||
}
|
||||
#endif /* DBUG_TRACE */
|
||||
|
||||
|
||||
/**********************************************************************//**
|
||||
Performs an execution step of an open or close cursor statement node.
|
||||
@param thr query thread */
|
||||
|
@ -614,10 +545,6 @@ que_thr_step(
|
|||
|
||||
old_thr = thr;
|
||||
|
||||
DBUG_PRINT("ib_que", ("Execute %u (%s) at %p",
|
||||
unsigned(type), que_node_type_string(node),
|
||||
(const void*) node));
|
||||
|
||||
if (type & QUE_NODE_CONTROL_STAT) {
|
||||
if ((thr->prev_node != que_node_get_parent(node))
|
||||
&& que_node_get_next(thr->prev_node)) {
|
||||
|
|
|
@ -61,57 +61,49 @@ Completed by Sunny Bains and Marko Makela
|
|||
/* Whether to disable file system cache */
|
||||
char srv_disable_sort_file_cache;
|
||||
|
||||
/** Class that caches index row tuples made from a single cluster
|
||||
/** Class that caches spatial index row tuples made from a single cluster
|
||||
index page scan, and then insert into corresponding index tree */
|
||||
class index_tuple_info_t {
|
||||
class spatial_index_info {
|
||||
public:
|
||||
/** constructor
|
||||
@param[in] heap memory heap
|
||||
@param[in] index index to be created */
|
||||
index_tuple_info_t(mem_heap_t* heap, dict_index_t* index) :
|
||||
m_dtuple_vec(UT_NEW_NOKEY(idx_tuple_vec())),
|
||||
m_index(index), m_heap(heap)
|
||||
{ ut_ad(index->is_spatial()); }
|
||||
/** constructor
|
||||
@param index spatial index to be created */
|
||||
spatial_index_info(dict_index_t *index) : index(index)
|
||||
{
|
||||
ut_ad(index->is_spatial());
|
||||
}
|
||||
|
||||
/** destructor */
|
||||
~index_tuple_info_t()
|
||||
{
|
||||
UT_DELETE(m_dtuple_vec);
|
||||
}
|
||||
|
||||
/** Get the index object
|
||||
@return the index object */
|
||||
dict_index_t* get_index() UNIV_NOTHROW
|
||||
{
|
||||
return(m_index);
|
||||
}
|
||||
|
||||
/** Caches an index row into index tuple vector
|
||||
@param[in] row table row
|
||||
@param[in] ext externally stored column
|
||||
prefixes, or NULL */
|
||||
void add(
|
||||
const dtuple_t* row,
|
||||
const row_ext_t* ext) UNIV_NOTHROW
|
||||
{
|
||||
dtuple_t* dtuple;
|
||||
|
||||
dtuple = row_build_index_entry(row, ext, m_index, m_heap);
|
||||
|
||||
ut_ad(dtuple);
|
||||
|
||||
m_dtuple_vec->push_back(dtuple);
|
||||
}
|
||||
/** Caches an index row into index tuple vector
|
||||
@param[in] row table row
|
||||
@param[in] ext externally stored column prefixes, or NULL */
|
||||
void add(const dtuple_t *row, const row_ext_t *ext, mem_heap_t *heap)
|
||||
{
|
||||
dtuple_t *dtuple= row_build_index_entry(row, ext, index, heap);
|
||||
ut_ad(dtuple);
|
||||
ut_ad(dtuple->n_fields == index->n_fields);
|
||||
if (ext)
|
||||
{
|
||||
/* Replace any references to ext, because ext will be allocated
|
||||
from row_heap. */
|
||||
for (ulint i= 1; i < dtuple->n_fields; i++)
|
||||
{
|
||||
dfield_t &dfield= dtuple->fields[i];
|
||||
if (dfield.data >= ext->buf &&
|
||||
dfield.data <= &ext->buf[ext->n_ext * ext->max_len])
|
||||
dfield_dup(&dfield, heap);
|
||||
}
|
||||
}
|
||||
m_dtuple_vec.push_back(dtuple);
|
||||
}
|
||||
|
||||
/** Insert spatial index rows cached in vector into spatial index
|
||||
@param[in] trx_id transaction id
|
||||
@param[in,out] row_heap memory heap
|
||||
@param[in] pcur cluster index scanning cursor
|
||||
@param[in,out] mtr_started whether scan_mtr is active
|
||||
@param[in,out] heap temporary memory heap
|
||||
@param[in,out] scan_mtr mini-transaction for pcur
|
||||
@return DB_SUCCESS if successful, else error number */
|
||||
dberr_t insert(trx_id_t trx_id, mem_heap_t* row_heap, btr_pcur_t* pcur,
|
||||
bool& mtr_started, mtr_t* scan_mtr) const
|
||||
dberr_t insert(trx_id_t trx_id, btr_pcur_t* pcur,
|
||||
bool& mtr_started, mem_heap_t* heap, mtr_t* scan_mtr)
|
||||
{
|
||||
big_rec_t* big_rec;
|
||||
rec_t* rec;
|
||||
|
@ -130,8 +122,8 @@ public:
|
|||
DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush",
|
||||
log_sys.set_check_flush_or_checkpoint(););
|
||||
|
||||
for (idx_tuple_vec::iterator it = m_dtuple_vec->begin();
|
||||
it != m_dtuple_vec->end();
|
||||
for (idx_tuple_vec::iterator it = m_dtuple_vec.begin();
|
||||
it != m_dtuple_vec.end();
|
||||
++it) {
|
||||
dtuple = *it;
|
||||
ut_ad(dtuple);
|
||||
|
@ -151,29 +143,29 @@ public:
|
|||
}
|
||||
|
||||
mtr.start();
|
||||
m_index->set_modified(mtr);
|
||||
index->set_modified(mtr);
|
||||
|
||||
ins_cur.index = m_index;
|
||||
rtr_init_rtr_info(&rtr_info, false, &ins_cur, m_index,
|
||||
ins_cur.index = index;
|
||||
rtr_init_rtr_info(&rtr_info, false, &ins_cur, index,
|
||||
false);
|
||||
rtr_info_update_btr(&ins_cur, &rtr_info);
|
||||
|
||||
error = btr_cur_search_to_nth_level(
|
||||
m_index, 0, dtuple, PAGE_CUR_RTREE_INSERT,
|
||||
index, 0, dtuple, PAGE_CUR_RTREE_INSERT,
|
||||
BTR_MODIFY_LEAF, &ins_cur, &mtr);
|
||||
|
||||
/* It need to update MBR in parent entry,
|
||||
so change search mode to BTR_MODIFY_TREE */
|
||||
if (error == DB_SUCCESS && rtr_info.mbr_adj) {
|
||||
mtr_commit(&mtr);
|
||||
mtr.commit();
|
||||
rtr_clean_rtr_info(&rtr_info, true);
|
||||
rtr_init_rtr_info(&rtr_info, false, &ins_cur,
|
||||
m_index, false);
|
||||
index, false);
|
||||
rtr_info_update_btr(&ins_cur, &rtr_info);
|
||||
mtr_start(&mtr);
|
||||
m_index->set_modified(mtr);
|
||||
mtr.start();
|
||||
index->set_modified(mtr);
|
||||
error = btr_cur_search_to_nth_level(
|
||||
m_index, 0, dtuple,
|
||||
index, 0, dtuple,
|
||||
PAGE_CUR_RTREE_INSERT,
|
||||
BTR_MODIFY_TREE, &ins_cur, &mtr);
|
||||
}
|
||||
|
@ -181,7 +173,7 @@ public:
|
|||
if (error == DB_SUCCESS) {
|
||||
error = btr_cur_optimistic_insert(
|
||||
flag, &ins_cur, &ins_offsets,
|
||||
&row_heap, dtuple, &rec, &big_rec,
|
||||
&heap, dtuple, &rec, &big_rec,
|
||||
0, NULL, &mtr);
|
||||
}
|
||||
|
||||
|
@ -190,15 +182,15 @@ public:
|
|||
if (error == DB_FAIL) {
|
||||
mtr.commit();
|
||||
mtr.start();
|
||||
m_index->set_modified(mtr);
|
||||
index->set_modified(mtr);
|
||||
|
||||
rtr_clean_rtr_info(&rtr_info, true);
|
||||
rtr_init_rtr_info(&rtr_info, false,
|
||||
&ins_cur, m_index, false);
|
||||
&ins_cur, index, false);
|
||||
|
||||
rtr_info_update_btr(&ins_cur, &rtr_info);
|
||||
error = btr_cur_search_to_nth_level(
|
||||
m_index, 0, dtuple,
|
||||
index, 0, dtuple,
|
||||
PAGE_CUR_RTREE_INSERT,
|
||||
BTR_MODIFY_TREE,
|
||||
&ins_cur, &mtr);
|
||||
|
@ -206,7 +198,7 @@ public:
|
|||
if (error == DB_SUCCESS) {
|
||||
error = btr_cur_pessimistic_insert(
|
||||
flag, &ins_cur, &ins_offsets,
|
||||
&row_heap, dtuple, &rec,
|
||||
&heap, dtuple, &rec,
|
||||
&big_rec, 0, NULL, &mtr);
|
||||
}
|
||||
}
|
||||
|
@ -232,30 +224,26 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
mtr_commit(&mtr);
|
||||
mtr.commit();
|
||||
|
||||
rtr_clean_rtr_info(&rtr_info, true);
|
||||
}
|
||||
|
||||
m_dtuple_vec->clear();
|
||||
m_dtuple_vec.clear();
|
||||
|
||||
return(error);
|
||||
}
|
||||
|
||||
private:
|
||||
/** Cache index rows made from a cluster index scan. Usually
|
||||
for rows on single cluster index page */
|
||||
typedef std::vector<dtuple_t*, ut_allocator<dtuple_t*> >
|
||||
idx_tuple_vec;
|
||||
/** Cache index rows made from a cluster index scan. Usually
|
||||
for rows on single cluster index page */
|
||||
typedef std::vector<dtuple_t*, ut_allocator<dtuple_t*> > idx_tuple_vec;
|
||||
|
||||
/** vector used to cache index rows made from cluster index scan */
|
||||
idx_tuple_vec* const m_dtuple_vec;
|
||||
|
||||
/** the index being built */
|
||||
dict_index_t* const m_index;
|
||||
|
||||
/** memory heap for creating index tuples */
|
||||
mem_heap_t* const m_heap;
|
||||
/** vector used to cache index rows made from cluster index scan */
|
||||
idx_tuple_vec m_dtuple_vec;
|
||||
public:
|
||||
/** the index being built */
|
||||
dict_index_t*const index;
|
||||
};
|
||||
|
||||
/* Maximum pending doc memory limit in bytes for a fts tokenization thread */
|
||||
|
@ -1749,8 +1737,7 @@ row_mtuple_cmp(
|
|||
@param[in] trx_id transaction id
|
||||
@param[in] sp_tuples cached spatial rows
|
||||
@param[in] num_spatial number of spatial indexes
|
||||
@param[in,out] heap heap for insert
|
||||
@param[in,out] sp_heap heap for tuples
|
||||
@param[in,out] heap temporary memory heap
|
||||
@param[in,out] pcur cluster index cursor
|
||||
@param[in,out] started whether mtr is active
|
||||
@param[in,out] mtr mini-transaction
|
||||
|
@ -1759,10 +1746,9 @@ static
|
|||
dberr_t
|
||||
row_merge_spatial_rows(
|
||||
trx_id_t trx_id,
|
||||
index_tuple_info_t** sp_tuples,
|
||||
spatial_index_info** sp_tuples,
|
||||
ulint num_spatial,
|
||||
mem_heap_t* heap,
|
||||
mem_heap_t* sp_heap,
|
||||
btr_pcur_t* pcur,
|
||||
bool& started,
|
||||
mtr_t* mtr)
|
||||
|
@ -1771,10 +1757,10 @@ row_merge_spatial_rows(
|
|||
return DB_SUCCESS;
|
||||
|
||||
for (ulint j= 0; j < num_spatial; j++)
|
||||
if (dberr_t err= sp_tuples[j]->insert(trx_id, heap, pcur, started, mtr))
|
||||
if (dberr_t err= sp_tuples[j]->insert(trx_id, pcur, started, heap, mtr))
|
||||
return err;
|
||||
|
||||
mem_heap_empty(sp_heap);
|
||||
mem_heap_empty(heap);
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1890,8 +1876,7 @@ row_merge_read_clustered_index(
|
|||
doc_id_t max_doc_id = 0;
|
||||
ibool add_doc_id = FALSE;
|
||||
pthread_cond_t* fts_parallel_sort_cond = nullptr;
|
||||
index_tuple_info_t** sp_tuples = NULL;
|
||||
mem_heap_t* sp_heap = NULL;
|
||||
spatial_index_info** sp_tuples = nullptr;
|
||||
ulint num_spatial = 0;
|
||||
BtrBulk* clust_btr_bulk = NULL;
|
||||
bool clust_temp_file = false;
|
||||
|
@ -1980,9 +1965,7 @@ row_merge_read_clustered_index(
|
|||
if (num_spatial > 0) {
|
||||
ulint count = 0;
|
||||
|
||||
sp_heap = mem_heap_create(512);
|
||||
|
||||
sp_tuples = static_cast<index_tuple_info_t**>(
|
||||
sp_tuples = static_cast<spatial_index_info**>(
|
||||
ut_malloc_nokey(num_spatial
|
||||
* sizeof(*sp_tuples)));
|
||||
|
||||
|
@ -1990,9 +1973,7 @@ row_merge_read_clustered_index(
|
|||
if (dict_index_is_spatial(index[i])) {
|
||||
sp_tuples[count]
|
||||
= UT_NEW_NOKEY(
|
||||
index_tuple_info_t(
|
||||
sp_heap,
|
||||
index[i]));
|
||||
spatial_index_info(index[i]));
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
@ -2167,7 +2148,7 @@ corrupted_rec:
|
|||
/* Insert the cached spatial index rows. */
|
||||
err = row_merge_spatial_rows(
|
||||
trx->id, sp_tuples, num_spatial,
|
||||
row_heap, sp_heap, &pcur, mtr_started, &mtr);
|
||||
row_heap, &pcur, mtr_started, &mtr);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
goto func_exit;
|
||||
|
@ -2561,7 +2542,7 @@ write_buffers:
|
|||
continue;
|
||||
}
|
||||
|
||||
ut_ad(sp_tuples[s_idx_cnt]->get_index()
|
||||
ut_ad(sp_tuples[s_idx_cnt]->index
|
||||
== buf->index);
|
||||
|
||||
/* If the geometry field is invalid, report
|
||||
|
@ -2571,7 +2552,7 @@ write_buffers:
|
|||
break;
|
||||
}
|
||||
|
||||
sp_tuples[s_idx_cnt]->add(row, ext);
|
||||
sp_tuples[s_idx_cnt]->add(row, ext, buf->heap);
|
||||
s_idx_cnt++;
|
||||
|
||||
continue;
|
||||
|
@ -2693,7 +2674,7 @@ write_buffers:
|
|||
err = row_merge_spatial_rows(
|
||||
trx->id, sp_tuples,
|
||||
num_spatial,
|
||||
row_heap, sp_heap,
|
||||
row_heap,
|
||||
&pcur, mtr_started,
|
||||
&mtr);
|
||||
|
||||
|
@ -3059,10 +3040,6 @@ wait_again:
|
|||
UT_DELETE(sp_tuples[i]);
|
||||
}
|
||||
ut_free(sp_tuples);
|
||||
|
||||
if (sp_heap) {
|
||||
mem_heap_free(sp_heap);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the next Doc ID we used. Table should be locked, so
|
||||
|
|
Loading…
Add table
Reference in a new issue