mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 12:02:42 +01:00
Merge 10.2 into bb-10.2-ext
This commit is contained in:
commit
ce07676502
37 changed files with 1073 additions and 850 deletions
|
@ -192,7 +192,7 @@ static my_long_addr_t read_addr(char** buf)
|
|||
while((c = hex_val(*p++)) != HEX_INVALID)
|
||||
addr = (addr << 4) + c;
|
||||
|
||||
*buf = p;
|
||||
*buf= p-1;
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -203,6 +203,7 @@ static int init_sym_entry(SYM_ENTRY* se, char* buf)
|
|||
|
||||
if (!se->addr)
|
||||
return -1;
|
||||
buf++;
|
||||
while (my_isspace(&my_charset_latin1,*buf++))
|
||||
/* empty */;
|
||||
|
||||
|
@ -281,32 +282,47 @@ static SYM_ENTRY* resolve_addr(uchar* addr, SYM_ENTRY* se)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
Resolve anything that starts with [0x or (+0x or start of line and 0x
|
||||
Skip '_end' as this is an indication of a wrong symbol (stack?)
|
||||
*/
|
||||
|
||||
static void do_resolve()
|
||||
{
|
||||
char buf[1024], *p;
|
||||
while (fgets(buf, sizeof(buf), fp_dump))
|
||||
{
|
||||
/* skip bracket */
|
||||
p= (p= strchr(buf, '[')) ? p+1 : buf;
|
||||
/* skip space */
|
||||
while (my_isspace(&my_charset_latin1,*p))
|
||||
++p;
|
||||
|
||||
if (*p++ == '0' && *p++ == 'x')
|
||||
for (p= buf ; *p ; p++)
|
||||
{
|
||||
SYM_ENTRY se ;
|
||||
uchar* addr = (uchar*)read_addr(&p);
|
||||
if (resolve_addr(addr, &se))
|
||||
fprintf(fp_out, "%p %s + %d\n", addr, se.symbol,
|
||||
(int) (addr - se.addr));
|
||||
int found= 0;
|
||||
if (p[0] == '[' && p[1] == '0' && p[2] == 'x')
|
||||
found= 3;
|
||||
if (p[0] == '(' && p[1] == '+' && p[2] == '0' && p[3] == 'x')
|
||||
found= 4;
|
||||
|
||||
/* For stdin */
|
||||
if (p == buf && p[0] == '0' && p[1] == 'x')
|
||||
found= 2;
|
||||
|
||||
if (found)
|
||||
{
|
||||
SYM_ENTRY se ;
|
||||
uchar *addr;
|
||||
char *tmp= p + found;
|
||||
addr= (uchar*)read_addr(&tmp);
|
||||
if (resolve_addr(addr, &se) && strcmp(se.symbol, "_end"))
|
||||
{
|
||||
fprintf(fp_out, "%c%p %s + %d", *p, addr, se.symbol,
|
||||
(int) (addr - se.addr));
|
||||
p= tmp-1;
|
||||
}
|
||||
else
|
||||
{
|
||||
fputc(*p, stdout);
|
||||
}
|
||||
}
|
||||
else
|
||||
fprintf(fp_out, "%p (?)\n", addr);
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
fputs(buf, fp_out);
|
||||
continue;
|
||||
fputc(*p, stdout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,6 +65,8 @@ flush logs;
|
|||
# To make it predictable, we do a useless update now, but which has the
|
||||
# interest of making the slave catch both rotate events.
|
||||
|
||||
let $skip_checkpoint_events=1;
|
||||
|
||||
eval create table t3 (a int)ENGINE=$engine_type;
|
||||
|
||||
# Sync slave and force it to start on another binary log
|
||||
|
|
|
@ -53,6 +53,9 @@
|
|||
#
|
||||
# $rpl_debug
|
||||
# If set, verbose debug info is printed.
|
||||
#
|
||||
# $filter_script
|
||||
# If set, rows matching this regexp will be filtered out
|
||||
|
||||
--let $include_filename= filter_file.inc
|
||||
--source include/begin_include_file.inc
|
||||
|
@ -67,10 +70,12 @@ if ($rpl_debug)
|
|||
|
||||
--let _FF_PRE_SCRIPT= $pre_script
|
||||
--let _FF_SCRIPT= $script
|
||||
--let _FF_FILTER_SCRIPT= $filter_script
|
||||
--let _FF_INPUT_FILE= $input_file
|
||||
--let _FF_OUTPUT_FILE= $output_file
|
||||
--let _FF_SELECT_COLUMNS= $select_columns
|
||||
--let _FF_DEBUG= $rpl_debug
|
||||
|
||||
if (!$output_file)
|
||||
{
|
||||
--let _FF_OUTPUT_FILE= $input_file
|
||||
|
@ -79,6 +84,7 @@ perl;
|
|||
my $pre_script = $ENV{'_FF_PRE_SCRIPT'};
|
||||
$pre_script =~ s/DOLLAR/\$/g;
|
||||
my $script = $ENV{'_FF_SCRIPT'};
|
||||
my $filter_script = $ENV{'_FF_FILTER_SCRIPT'};
|
||||
$script =~ s/DOLLAR/\$/g;
|
||||
my $input_file = $ENV{'_FF_INPUT_FILE'};
|
||||
my $output_file = $ENV{'_FF_OUTPUT_FILE'};
|
||||
|
@ -123,7 +129,10 @@ perl;
|
|||
{
|
||||
' . $script . '
|
||||
}
|
||||
$filtered_contents .= $_."\n";
|
||||
if (!$filter_script || ! m/$filter_script/)
|
||||
{
|
||||
$filtered_contents .= $_."\n";
|
||||
}
|
||||
}
|
||||
close FILE or die "Error closing $input_file: $!";
|
||||
open FILE, "> $output_file" or die "Error opening $output_file: $!";
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
# [--let $binlog_file= [<FILENAME> | LAST]]
|
||||
# [--let $binlog_start= <POSITION> ]
|
||||
# [--let $binlog_limit= 1, 3 ]
|
||||
# [--let $keep_gtid_events= 1]
|
||||
# [--let $skip_checkpoint_events= 1]
|
||||
# --source include/show_binlog_events.inc
|
||||
#
|
||||
# Parameters:
|
||||
|
@ -26,12 +26,6 @@
|
|||
# Limit for the 'LIMIT' clause of SHOW BINLOG EVENTS, i.e.:
|
||||
# $binlog_limit= 3 -- print three events
|
||||
# $binlog_limit= 4, 3 -- skip four events, print the three next events
|
||||
#
|
||||
# $keep_gtid_events
|
||||
# By default, Gtid_log_event and Previous_gtid_log_event are
|
||||
# filtered out, so that the output is independent of whether GTIDs
|
||||
# are enabled or not. If this flag is set, events are kept but
|
||||
# the actual GTID values are masked out.
|
||||
|
||||
--let $include_filename= show_binlog_events.inc
|
||||
--source include/begin_include_file.inc
|
||||
|
|
|
@ -104,8 +104,15 @@ let $script=
|
|||
s{DOLLARmysqltest_vardir}{MYSQLTEST_VARDIR}g;
|
||||
||
|
||||
--let $pre_script= my DOLLARmysqltest_vardir = DOLLARENV{'MYSQLTEST_VARDIR'};
|
||||
|
||||
--delimiter ;
|
||||
|
||||
if ($skip_checkpoint_events)
|
||||
{
|
||||
let $filter_script=Binlog_checkpoint;
|
||||
}
|
||||
|
||||
|
||||
#--let $select_columns= 1 3 6
|
||||
--let $input_file= $output_file
|
||||
--source include/filter_file.inc
|
||||
|
|
127
mysql-test/suite/gcol/r/innodb_virtual_stats.result
Normal file
127
mysql-test/suite/gcol/r/innodb_virtual_stats.result
Normal file
|
@ -0,0 +1,127 @@
|
|||
CREATE TABLE t (
|
||||
a INT,
|
||||
b INT,
|
||||
c INT GENERATED ALWAYS AS(a+b),
|
||||
d INT GENERATED ALWAYS AS(a+b+b),
|
||||
KEY idxa (a),
|
||||
KEY vidxcd (c, d)
|
||||
) ENGINE=INNODB STATS_PERSISTENT=1 STATS_AUTO_RECALC=1;
|
||||
INSERT INTO t (a,b) VALUES (1, 2);
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
index_name stat_name stat_description
|
||||
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
|
||||
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
|
||||
GEN_CLUST_INDEX size Number of pages in the index
|
||||
idxa n_diff_pfx01 a
|
||||
idxa n_diff_pfx02 a,DB_ROW_ID
|
||||
idxa n_leaf_pages Number of leaf pages in the index
|
||||
idxa size Number of pages in the index
|
||||
vidxcd n_diff_pfx01 c
|
||||
vidxcd n_diff_pfx02 c,d
|
||||
vidxcd n_diff_pfx03 c,d,DB_ROW_ID
|
||||
vidxcd n_leaf_pages Number of leaf pages in the index
|
||||
vidxcd size Number of pages in the index
|
||||
ALTER TABLE t ADD COLUMN e INT GENERATED ALWAYS AS(a+a+b), ADD INDEX idxb (b), ALGORITHM=INPLACE;
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
index_name stat_name stat_description
|
||||
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
|
||||
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
|
||||
GEN_CLUST_INDEX size Number of pages in the index
|
||||
idxa n_diff_pfx01 a
|
||||
idxa n_diff_pfx02 a,DB_ROW_ID
|
||||
idxa n_leaf_pages Number of leaf pages in the index
|
||||
idxa size Number of pages in the index
|
||||
idxb n_diff_pfx01 b
|
||||
idxb n_diff_pfx02 b,DB_ROW_ID
|
||||
idxb n_leaf_pages Number of leaf pages in the index
|
||||
idxb size Number of pages in the index
|
||||
vidxcd n_diff_pfx01 c
|
||||
vidxcd n_diff_pfx02 c,d
|
||||
vidxcd n_diff_pfx03 c,d,DB_ROW_ID
|
||||
vidxcd n_leaf_pages Number of leaf pages in the index
|
||||
vidxcd size Number of pages in the index
|
||||
ALTER TABLE t DROP COLUMN c, DROP INDEX idxa, ALGORITHM=INPLACE;
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
index_name stat_name stat_description
|
||||
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
|
||||
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
|
||||
GEN_CLUST_INDEX size Number of pages in the index
|
||||
idxb n_diff_pfx01 b
|
||||
idxb n_diff_pfx02 b,DB_ROW_ID
|
||||
idxb n_leaf_pages Number of leaf pages in the index
|
||||
idxb size Number of pages in the index
|
||||
vidxcd n_diff_pfx01 d
|
||||
vidxcd n_diff_pfx02 d,DB_ROW_ID
|
||||
vidxcd n_leaf_pages Number of leaf pages in the index
|
||||
vidxcd size Number of pages in the index
|
||||
ALTER TABLE t ADD INDEX vidxe (e), ALGORITHM=INPLACE;
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
index_name stat_name stat_description
|
||||
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
|
||||
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
|
||||
GEN_CLUST_INDEX size Number of pages in the index
|
||||
idxb n_diff_pfx01 b
|
||||
idxb n_diff_pfx02 b,DB_ROW_ID
|
||||
idxb n_leaf_pages Number of leaf pages in the index
|
||||
idxb size Number of pages in the index
|
||||
vidxcd n_diff_pfx01 d
|
||||
vidxcd n_diff_pfx02 d,DB_ROW_ID
|
||||
vidxcd n_leaf_pages Number of leaf pages in the index
|
||||
vidxcd size Number of pages in the index
|
||||
vidxe n_diff_pfx01 e
|
||||
vidxe n_diff_pfx02 e,DB_ROW_ID
|
||||
vidxe n_leaf_pages Number of leaf pages in the index
|
||||
vidxe size Number of pages in the index
|
||||
ALTER TABLE t ADD COLUMN f INT GENERATED ALWAYS AS(a + a), ADD INDEX vidxf (f), ALGORITHM=INPLACE;
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
index_name stat_name stat_description
|
||||
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
|
||||
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
|
||||
GEN_CLUST_INDEX size Number of pages in the index
|
||||
idxb n_diff_pfx01 b
|
||||
idxb n_diff_pfx02 b,DB_ROW_ID
|
||||
idxb n_leaf_pages Number of leaf pages in the index
|
||||
idxb size Number of pages in the index
|
||||
vidxcd n_diff_pfx01 d
|
||||
vidxcd n_diff_pfx02 d,DB_ROW_ID
|
||||
vidxcd n_leaf_pages Number of leaf pages in the index
|
||||
vidxcd size Number of pages in the index
|
||||
vidxe n_diff_pfx01 e
|
||||
vidxe n_diff_pfx02 e,DB_ROW_ID
|
||||
vidxe n_leaf_pages Number of leaf pages in the index
|
||||
vidxe size Number of pages in the index
|
||||
vidxf n_diff_pfx01 f
|
||||
vidxf n_diff_pfx02 f,DB_ROW_ID
|
||||
vidxf n_leaf_pages Number of leaf pages in the index
|
||||
vidxf size Number of pages in the index
|
||||
ALTER TABLE t DROP INDEX vidxcd;
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
index_name stat_name stat_description
|
||||
GEN_CLUST_INDEX n_diff_pfx01 DB_ROW_ID
|
||||
GEN_CLUST_INDEX n_leaf_pages Number of leaf pages in the index
|
||||
GEN_CLUST_INDEX size Number of pages in the index
|
||||
idxb n_diff_pfx01 b
|
||||
idxb n_diff_pfx02 b,DB_ROW_ID
|
||||
idxb n_leaf_pages Number of leaf pages in the index
|
||||
idxb size Number of pages in the index
|
||||
vidxe n_diff_pfx01 e
|
||||
vidxe n_diff_pfx02 e,DB_ROW_ID
|
||||
vidxe n_leaf_pages Number of leaf pages in the index
|
||||
vidxe size Number of pages in the index
|
||||
vidxf n_diff_pfx01 f
|
||||
vidxf n_diff_pfx02 f,DB_ROW_ID
|
||||
vidxf n_leaf_pages Number of leaf pages in the index
|
||||
vidxf size Number of pages in the index
|
||||
DROP TABLE t;
|
52
mysql-test/suite/gcol/t/innodb_virtual_stats.test
Normal file
52
mysql-test/suite/gcol/t/innodb_virtual_stats.test
Normal file
|
@ -0,0 +1,52 @@
|
|||
--source include/have_innodb.inc
|
||||
|
||||
#
|
||||
# BUG#22469660 INNODB DOESN'T UPDATE INDEX STATS WHEN ADDING OR DROPPING VIRTUAL COLUMN
|
||||
#
|
||||
|
||||
CREATE TABLE t (
|
||||
a INT,
|
||||
b INT,
|
||||
c INT GENERATED ALWAYS AS(a+b),
|
||||
d INT GENERATED ALWAYS AS(a+b+b),
|
||||
KEY idxa (a),
|
||||
KEY vidxcd (c, d)
|
||||
) ENGINE=INNODB STATS_PERSISTENT=1 STATS_AUTO_RECALC=1;
|
||||
|
||||
INSERT INTO t (a,b) VALUES (1, 2);
|
||||
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
|
||||
ALTER TABLE t ADD COLUMN e INT GENERATED ALWAYS AS(a+a+b), ADD INDEX idxb (b), ALGORITHM=INPLACE;
|
||||
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
|
||||
ALTER TABLE t DROP COLUMN c, DROP INDEX idxa, ALGORITHM=INPLACE;
|
||||
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
|
||||
ALTER TABLE t ADD INDEX vidxe (e), ALGORITHM=INPLACE;
|
||||
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
|
||||
ALTER TABLE t ADD COLUMN f INT GENERATED ALWAYS AS(a + a), ADD INDEX vidxf (f), ALGORITHM=INPLACE;
|
||||
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
|
||||
ALTER TABLE t DROP INDEX vidxcd;
|
||||
|
||||
SELECT index_name, stat_name, stat_description
|
||||
FROM mysql.innodb_index_stats
|
||||
WHERE database_name = 'test' AND table_name = 't';
|
||||
|
||||
DROP TABLE t;
|
12
mysql-test/suite/innodb/r/innodb_stats_debug.result
Normal file
12
mysql-test/suite/innodb/r/innodb_stats_debug.result
Normal file
|
@ -0,0 +1,12 @@
|
|||
call mtr.add_suppression("InnoDB: Cannot save (table|index) statistics for table `test`\\.`t1`.*: Persistent statistics do not exist");
|
||||
CREATE TABLE t1 (a INT, KEY(a)) ENGINE=INNODB STATS_PERSISTENT=1;
|
||||
SET @save_debug= @@SESSION.debug_dbug;
|
||||
SET debug_dbug= '+d,stats_index_error';
|
||||
ANALYZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Operation failed
|
||||
SET debug_dbug= @save_debug;
|
||||
ANALYZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
DROP TABLE t1;
|
|
@ -1,6 +1,5 @@
|
|||
call mtr.add_suppression("InnoDB: Cannot save table statistics for table `test`\\.`t1`: Persistent statistics do not exist");
|
||||
SET GLOBAL innodb_stats_persistent= ON;
|
||||
CREATE TABLE t1 (t TEXT) ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (t TEXT) ENGINE=InnoDB STATS_PERSISTENT=1;
|
||||
connect con1,localhost,root,,test;
|
||||
SET DEBUG_SYNC='ib_trunc_table_trunc_completing SIGNAL committed WAIT_FOR ever';
|
||||
TRUNCATE TABLE t1;
|
||||
|
|
13
mysql-test/suite/innodb/t/innodb_stats_debug.test
Normal file
13
mysql-test/suite/innodb/t/innodb_stats_debug.test
Normal file
|
@ -0,0 +1,13 @@
|
|||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
call mtr.add_suppression("InnoDB: Cannot save (table|index) statistics for table `test`\\.`t1`.*: Persistent statistics do not exist");
|
||||
|
||||
CREATE TABLE t1 (a INT, KEY(a)) ENGINE=INNODB STATS_PERSISTENT=1;
|
||||
SET @save_debug= @@SESSION.debug_dbug;
|
||||
SET debug_dbug= '+d,stats_index_error';
|
||||
ANALYZE TABLE t1;
|
||||
SET debug_dbug= @save_debug;
|
||||
ANALYZE TABLE t1;
|
||||
|
||||
DROP TABLE t1;
|
|
@ -57,5 +57,5 @@ SELECT table_name FROM mysql.innodb_index_stats
|
|||
WHERE table_name='innodb_stats_drop_locked';
|
||||
|
||||
--disable_query_log
|
||||
call mtr.add_suppression("Unable to delete statistics for table test.innodb_stats_drop_locked: Lock wait timeout. They can be deleted later using DELETE FROM mysql.innodb_index_stats WHERE database_name");
|
||||
call mtr.add_suppression("Unable to delete statistics for table test\\.innodb_stats_drop_locked: Lock wait");
|
||||
--enable_query_log
|
||||
|
|
|
@ -2,10 +2,8 @@
|
|||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
|
||||
call mtr.add_suppression("InnoDB: Cannot save table statistics for table `test`\\.`t1`: Persistent statistics do not exist");
|
||||
|
||||
SET GLOBAL innodb_stats_persistent= ON;
|
||||
CREATE TABLE t1 (t TEXT) ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (t TEXT) ENGINE=InnoDB STATS_PERSISTENT=1;
|
||||
--connect (con1,localhost,root,,test)
|
||||
SET DEBUG_SYNC='ib_trunc_table_trunc_completing SIGNAL committed WAIT_FOR ever';
|
||||
--send
|
||||
|
|
|
@ -226,7 +226,6 @@ master-bin.000001 # Query # # COMMIT
|
|||
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000002 # Binlog_checkpoint # # master-bin.000002
|
||||
master-bin.000002 # Gtid # # GTID #-#-#
|
||||
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
|
||||
master-bin.000002 # Gtid # # GTID #-#-#
|
||||
|
@ -268,7 +267,6 @@ slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
|
|||
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
|
||||
slave-bin.000002 # Gtid # # GTID #-#-#
|
||||
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=MyISAM
|
||||
slave-bin.000002 # Gtid # # BEGIN GTID #-#-#
|
||||
|
|
|
@ -226,7 +226,6 @@ master-bin.000001 # Xid # # COMMIT /* XID */
|
|||
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000002 # Binlog_checkpoint # # master-bin.000002
|
||||
master-bin.000002 # Gtid # # GTID #-#-#
|
||||
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=InnoDB
|
||||
master-bin.000002 # Gtid # # GTID #-#-#
|
||||
|
@ -268,7 +267,6 @@ slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=InnoDB
|
|||
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
|
||||
slave-bin.000002 # Gtid # # GTID #-#-#
|
||||
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=InnoDB
|
||||
slave-bin.000002 # Gtid # # BEGIN GTID #-#-#
|
||||
|
|
|
@ -222,7 +222,6 @@ master-bin.000001 # Query # # COMMIT
|
|||
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000002 # Binlog_checkpoint # # master-bin.000002
|
||||
master-bin.000002 # Gtid # # GTID #-#-#
|
||||
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
|
||||
master-bin.000002 # Gtid # # GTID #-#-#
|
||||
|
@ -260,7 +259,6 @@ slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
|
|||
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
|
||||
slave-bin.000002 # Gtid # # GTID #-#-#
|
||||
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=MyISAM
|
||||
slave-bin.000002 # Gtid # # BEGIN GTID #-#-#
|
||||
|
|
|
@ -751,6 +751,8 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
|
|||
buf_block_t* first_block;
|
||||
buf_block_t* last_block;
|
||||
|
||||
trx_t* trx = trx_allocate_for_background();
|
||||
|
||||
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
|
||||
ut_ad(btr_defragment_thread_active);
|
||||
|
||||
|
@ -826,31 +828,35 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
|
|||
/* Update the last_processed time of this index. */
|
||||
item->last_processed = now;
|
||||
} else {
|
||||
dberr_t err = DB_SUCCESS;
|
||||
mtr_commit(&mtr);
|
||||
/* Reaching the end of the index. */
|
||||
dict_stats_empty_defrag_stats(index);
|
||||
err = dict_stats_save_defrag_stats(index);
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::error() << "Saving defragmentation stats for table "
|
||||
<< index->table->name.m_name
|
||||
<< " index " << index->name()
|
||||
<< " failed with error " << err;
|
||||
} else {
|
||||
err = dict_stats_save_defrag_summary(index);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::error() << "Saving defragmentation summary for table "
|
||||
<< index->table->name.m_name
|
||||
<< " index " << index->name()
|
||||
<< " failed with error " << err;
|
||||
}
|
||||
ut_d(trx->persistent_stats = true);
|
||||
++trx->will_lock;
|
||||
dberr_t err = dict_stats_save_defrag_stats(index, trx);
|
||||
if (err == DB_SUCCESS) {
|
||||
err = dict_stats_save_defrag_summary(
|
||||
index, trx);
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
trx_rollback_to_savepoint(trx, NULL);
|
||||
ib::error() << "Saving defragmentation stats for table "
|
||||
<< index->table->name
|
||||
<< " index " << index->name
|
||||
<< " failed with error "
|
||||
<< ut_strerr(err);
|
||||
} else if (trx->state != TRX_STATE_NOT_STARTED) {
|
||||
trx_commit_for_mysql(trx);
|
||||
}
|
||||
|
||||
ut_d(trx->persistent_stats = false);
|
||||
btr_defragment_remove_item(item);
|
||||
}
|
||||
}
|
||||
|
||||
trx_free_for_background(trx);
|
||||
|
||||
btr_defragment_thread_active = false;
|
||||
os_thread_exit();
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
|
|
|
@ -28,6 +28,7 @@ Created 11/11/1995 Heikki Tuuri
|
|||
#include "ha_prototypes.h"
|
||||
#include <mysql/service_thd_wait.h>
|
||||
#include <my_dbug.h>
|
||||
#include <sql_class.h>
|
||||
|
||||
#include "buf0flu.h"
|
||||
#include "buf0buf.h"
|
||||
|
@ -2690,21 +2691,21 @@ than a second
|
|||
@retval OS_SYNC_TIME_EXCEEDED if timeout was exceeded
|
||||
@param next_loop_time time when next loop iteration should start
|
||||
@param sig_count zero or the value returned by previous call of
|
||||
os_event_reset() */
|
||||
os_event_reset()
|
||||
@param cur_time current time as in ut_time_ms() */
|
||||
static
|
||||
ulint
|
||||
pc_sleep_if_needed(
|
||||
/*===============*/
|
||||
ulint next_loop_time,
|
||||
int64_t sig_count)
|
||||
int64_t sig_count,
|
||||
ulint cur_time)
|
||||
{
|
||||
/* No sleep if we are cleaning the buffer pool during the shutdown
|
||||
with everything else finished */
|
||||
if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE)
|
||||
return OS_SYNC_TIME_EXCEEDED;
|
||||
|
||||
ulint cur_time = ut_time_ms();
|
||||
|
||||
if (next_loop_time > cur_time) {
|
||||
/* Get sleep interval in micro seconds. We use
|
||||
ut_min() to avoid long sleep in case of wrap around. */
|
||||
|
@ -3200,6 +3201,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(void*)
|
|||
ulint last_pages = 0;
|
||||
|
||||
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
|
||||
ulint curr_time = ut_time_ms();
|
||||
|
||||
/* The page_cleaner skips sleep if the server is
|
||||
idle and there are no pending IOs in the buffer pool
|
||||
|
@ -3209,23 +3211,22 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(void*)
|
|||
|| n_flushed == 0) {
|
||||
|
||||
ret_sleep = pc_sleep_if_needed(
|
||||
next_loop_time, sig_count);
|
||||
|
||||
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||
break;
|
||||
}
|
||||
} else if (ut_time_ms() > next_loop_time) {
|
||||
next_loop_time, sig_count, curr_time);
|
||||
} else if (curr_time > next_loop_time) {
|
||||
ret_sleep = OS_SYNC_TIME_EXCEEDED;
|
||||
} else {
|
||||
ret_sleep = 0;
|
||||
}
|
||||
|
||||
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
|
||||
break;
|
||||
}
|
||||
|
||||
sig_count = os_event_reset(buf_flush_event);
|
||||
|
||||
if (ret_sleep == OS_SYNC_TIME_EXCEEDED) {
|
||||
ulint curr_time = ut_time_ms();
|
||||
|
||||
if (curr_time > next_loop_time + 3000
|
||||
if (global_system_variables.log_warnings > 2
|
||||
&& curr_time > next_loop_time + 3000
|
||||
&& !(test_flags & TEST_SIGINT)) {
|
||||
if (warn_count == 0) {
|
||||
ib::info() << "page_cleaner: 1000ms"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, MariaDB Corporation. All Rights Reserved.
|
||||
Copyright (c) 2016, 2017, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -29,6 +29,7 @@ Created 25/08/2016 Jan Lindström
|
|||
#include "dict0defrag_bg.h"
|
||||
#include "row0mysql.h"
|
||||
#include "srv0start.h"
|
||||
#include "trx0roll.h"
|
||||
#include "ut0new.h"
|
||||
|
||||
#include <vector>
|
||||
|
@ -201,17 +202,15 @@ dict_stats_defrag_pool_del(
|
|||
mutex_exit(&defrag_pool_mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
/** Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats.
|
||||
@param[in,out] trx transaction that will be started and committed */
|
||||
static
|
||||
void
|
||||
dict_stats_process_entry_from_defrag_pool()
|
||||
/*=======================================*/
|
||||
dict_stats_process_entry_from_defrag_pool(trx_t* trx)
|
||||
{
|
||||
table_id_t table_id;
|
||||
index_id_t index_id;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
|
@ -230,96 +229,70 @@ dict_stats_process_entry_from_defrag_pool()
|
|||
table = dict_table_open_on_id(table_id, TRUE,
|
||||
DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
|
||||
|
||||
if (table == NULL) {
|
||||
dict_index_t* index = table && !table->corrupted
|
||||
? dict_table_find_index_on_id(table, index_id)
|
||||
: NULL;
|
||||
|
||||
if (!index || dict_index_is_corrupted(index)) {
|
||||
if (table) {
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether table is corrupted */
|
||||
if (table->corrupted) {
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
dict_index_t* index = dict_table_find_index_on_id(table, index_id);
|
||||
|
||||
if (index == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether index is corrupted */
|
||||
if (dict_index_is_corrupted(index)) {
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
err = dict_stats_save_defrag_stats(index);
|
||||
++trx->will_lock;
|
||||
dberr_t err = dict_stats_save_defrag_stats(index, trx);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
trx_rollback_to_savepoint(trx, NULL);
|
||||
ib::error() << "Saving defragmentation status for table "
|
||||
<< index->table->name.m_name
|
||||
<< " index " << index->name()
|
||||
<< index->table->name
|
||||
<< " index " << index->name
|
||||
<< " failed " << err;
|
||||
} else if (trx->state != TRX_STATE_NOT_STARTED) {
|
||||
trx_commit_for_mysql(trx);
|
||||
}
|
||||
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
/** Process indexes that have been scheduled for defragmenting.
|
||||
@param[in,out] trx transaction that will be started and committed */
|
||||
void
|
||||
dict_defrag_process_entries_from_defrag_pool()
|
||||
/*==========================================*/
|
||||
dict_defrag_process_entries_from_defrag_pool(trx_t* trx)
|
||||
{
|
||||
while (defrag_pool->size() && !dict_stats_start_shutdown) {
|
||||
dict_stats_process_entry_from_defrag_pool();
|
||||
dict_stats_process_entry_from_defrag_pool(trx);
|
||||
}
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation result.
|
||||
/** Save defragmentation result.
|
||||
@param[in] index index that was defragmented
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_summary(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
dict_stats_save_defrag_summary(dict_index_t* index, trx_t* trx)
|
||||
{
|
||||
dberr_t ret=DB_SUCCESS;
|
||||
lint now = (lint) ut_time();
|
||||
|
||||
if (dict_index_is_ibuf(index)) {
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_save_index_stat(index, now, "n_pages_freed",
|
||||
index->stat_defrag_n_pages_freed,
|
||||
NULL,
|
||||
"Number of pages freed during"
|
||||
" last defragmentation run.",
|
||||
NULL);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
return (ret);
|
||||
return dict_stats_save_index_stat(index, ut_time(), "n_pages_freed",
|
||||
index->stat_defrag_n_pages_freed,
|
||||
NULL,
|
||||
"Number of pages freed during"
|
||||
" last defragmentation run.",
|
||||
trx);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation stats for a given index.
|
||||
/** Save defragmentation stats for a given index.
|
||||
@param[in] index index that is being defragmented
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_stats(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx)
|
||||
{
|
||||
dberr_t ret;
|
||||
|
||||
if (dict_index_is_ibuf(index)) {
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
@ -328,7 +301,6 @@ dict_stats_save_defrag_stats(
|
|||
return dict_stats_report_error(index->table, true);
|
||||
}
|
||||
|
||||
lint now = (lint) ut_time();
|
||||
mtr_t mtr;
|
||||
ulint n_leaf_pages;
|
||||
ulint n_leaf_reserved;
|
||||
|
@ -345,40 +317,33 @@ dict_stats_save_defrag_stats(
|
|||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
ret = dict_stats_save_index_stat(index, now, "n_page_split",
|
||||
index->stat_defrag_n_page_split,
|
||||
NULL,
|
||||
"Number of new page splits on leaves"
|
||||
" since last defragmentation.",
|
||||
NULL);
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
lint now = ut_time();
|
||||
dberr_t err = dict_stats_save_index_stat(
|
||||
index, now, "n_page_split",
|
||||
index->stat_defrag_n_page_split,
|
||||
NULL,
|
||||
"Number of new page splits on leaves"
|
||||
" since last defragmentation.",
|
||||
trx);
|
||||
if (err == DB_SUCCESS) {
|
||||
err = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_defrag",
|
||||
n_leaf_pages,
|
||||
NULL,
|
||||
"Number of leaf pages when this stat is saved to disk",
|
||||
trx);
|
||||
}
|
||||
|
||||
ret = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_defrag",
|
||||
n_leaf_pages,
|
||||
NULL,
|
||||
"Number of leaf pages when this stat is saved to disk",
|
||||
NULL);
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
if (err == DB_SUCCESS) {
|
||||
err = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_reserved",
|
||||
n_leaf_reserved,
|
||||
NULL,
|
||||
"Number of pages reserved for this "
|
||||
"index leaves when this stat "
|
||||
"is saved to disk",
|
||||
trx);
|
||||
}
|
||||
|
||||
ret = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_reserved",
|
||||
n_leaf_reserved,
|
||||
NULL,
|
||||
"Number of pages reserved for this index leaves when this stat "
|
||||
"is saved to disk",
|
||||
NULL);
|
||||
|
||||
end:
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
return (ret);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2009, 2017, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2015, 2017, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
|
@ -276,9 +276,7 @@ This function will free the pinfo object.
|
|||
@param[in,out] pinfo pinfo to pass to que_eval_sql() must already
|
||||
have any literals bound to it
|
||||
@param[in] sql SQL string to execute
|
||||
@param[in,out] trx in case of NULL the function will allocate and
|
||||
free the trx object. If it is not NULL then it will be rolled back
|
||||
only in the case of error, but not freed.
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
static
|
||||
dberr_t
|
||||
|
@ -288,56 +286,12 @@ dict_stats_exec_sql(
|
|||
trx_t* trx)
|
||||
{
|
||||
dberr_t err;
|
||||
bool trx_started = false;
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
extern bool dict_stats_start_shutdown;
|
||||
|
||||
if (dict_stats_start_shutdown
|
||||
|| !dict_stats_persistent_storage_check(true)) {
|
||||
pars_info_free(pinfo);
|
||||
return(DB_STATS_DO_NOT_EXIST);
|
||||
}
|
||||
|
||||
if (trx == NULL) {
|
||||
trx = trx_allocate_for_background();
|
||||
trx_started = true;
|
||||
|
||||
if (srv_read_only_mode) {
|
||||
trx_start_internal_read_only(trx);
|
||||
} else {
|
||||
trx_start_internal(trx);
|
||||
}
|
||||
}
|
||||
|
||||
err = que_eval_sql(pinfo, sql, FALSE, trx); /* pinfo is freed here */
|
||||
|
||||
DBUG_EXECUTE_IF("stats_index_error",
|
||||
if (!trx_started) {
|
||||
err = DB_STATS_DO_NOT_EXIST;
|
||||
trx->error_state = DB_STATS_DO_NOT_EXIST;
|
||||
});
|
||||
|
||||
if (!trx_started && err == DB_SUCCESS) {
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
if (err == DB_SUCCESS) {
|
||||
trx_commit_for_mysql(trx);
|
||||
} else {
|
||||
trx->op_info = "rollback of internal trx on stats tables";
|
||||
trx->dict_operation_lock_mode = RW_X_LATCH;
|
||||
trx_rollback_to_savepoint(trx, NULL);
|
||||
trx->dict_operation_lock_mode = 0;
|
||||
trx->op_info = "";
|
||||
ut_a(trx->error_state == DB_SUCCESS);
|
||||
}
|
||||
|
||||
if (trx_started) {
|
||||
trx_free_for_background(trx);
|
||||
}
|
||||
trx->error_state = DB_STATS_DO_NOT_EXIST;);
|
||||
|
||||
return(err);
|
||||
}
|
||||
|
@ -546,39 +500,12 @@ dict_stats_empty_index(
|
|||
}
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Clear defragmentation summary. */
|
||||
UNIV_INTERN
|
||||
/** Reset the table and index statsistics, corresponding to an empty table.
|
||||
@param[in,out] table table whose statistics are to be reset
|
||||
@param[in] empty_defrag_stats whether to empty the defrag statistics
|
||||
*/
|
||||
void
|
||||
dict_stats_empty_defrag_summary(
|
||||
/*==================*/
|
||||
dict_index_t* index) /*!< in: index to clear defragmentation stats */
|
||||
{
|
||||
index->stat_defrag_n_pages_freed = 0;
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Clear defragmentation related index stats. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
dict_stats_empty_defrag_stats(
|
||||
/*==================*/
|
||||
dict_index_t* index) /*!< in: index to clear defragmentation stats */
|
||||
{
|
||||
index->stat_defrag_modified_counter = 0;
|
||||
index->stat_defrag_n_page_split = 0;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Write all zeros (or 1 where it makes sense) into a table and its indexes'
|
||||
statistics members. The resulting stats correspond to an empty table. */
|
||||
static
|
||||
void
|
||||
dict_stats_empty_table(
|
||||
/*===================*/
|
||||
dict_table_t* table, /*!< in/out: table */
|
||||
bool empty_defrag_stats)
|
||||
/*!< in: whether to empty defrag stats */
|
||||
dict_stats_empty_table(dict_table_t* table, bool empty_defrag_stats)
|
||||
{
|
||||
/* Zero the stats members */
|
||||
|
||||
|
@ -2324,9 +2251,7 @@ storage.
|
|||
@param[in] stat_value value of the stat
|
||||
@param[in] sample_size n pages sampled or NULL
|
||||
@param[in] stat_description description of the stat
|
||||
@param[in,out] trx in case of NULL the function will
|
||||
allocate and free the trx object. If it is not NULL then it will be
|
||||
rolled back only in the case of error, but not freed.
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_index_stat(
|
||||
|
@ -2343,8 +2268,7 @@ dict_stats_save_index_stat(
|
|||
char db_utf8[MAX_DB_UTF8_LEN];
|
||||
char table_utf8[MAX_TABLE_UTF8_LEN];
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
ut_ad(trx->persistent_stats || trx->in_mysql_trx_list);
|
||||
|
||||
dict_fs2utf8(index->table->name.m_name, db_utf8, sizeof(db_utf8),
|
||||
table_utf8, sizeof(table_utf8));
|
||||
|
@ -2370,6 +2294,8 @@ dict_stats_save_index_stat(
|
|||
pars_info_add_str_literal(pinfo, "stat_description",
|
||||
stat_description);
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
pinfo,
|
||||
"PROCEDURE INDEX_STATS_SAVE () IS\n"
|
||||
|
@ -2396,6 +2322,8 @@ dict_stats_save_index_stat(
|
|||
");\n"
|
||||
"END;", trx);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
if (innodb_index_stats_not_found == false &&
|
||||
index->stats_error_printed == false) {
|
||||
|
@ -2448,6 +2376,7 @@ dict_stats_report_error(dict_table_t* table, bool defragment)
|
|||
|
||||
/** Save the table's statistics into the persistent statistics storage.
|
||||
@param[in] table_orig table whose stats to save
|
||||
@param[in,out] trx transaction
|
||||
@param[in] only_for_index if this is non-NULL, then stats for indexes
|
||||
that are not equal to it will not be saved, if NULL, then all indexes' stats
|
||||
are saved
|
||||
|
@ -2456,7 +2385,8 @@ static
|
|||
dberr_t
|
||||
dict_stats_save(
|
||||
dict_table_t* table_orig,
|
||||
const index_id_t* only_for_index)
|
||||
trx_t* trx,
|
||||
const index_id_t* only_for_index = NULL)
|
||||
{
|
||||
pars_info_t* pinfo;
|
||||
lint now;
|
||||
|
@ -2465,6 +2395,8 @@ dict_stats_save(
|
|||
char db_utf8[MAX_DB_UTF8_LEN];
|
||||
char table_utf8[MAX_TABLE_UTF8_LEN];
|
||||
|
||||
ut_ad(trx->persistent_stats || trx->in_mysql_trx_list);
|
||||
|
||||
if (table_orig->is_readable()) {
|
||||
} else {
|
||||
return (dict_stats_report_error(table_orig));
|
||||
|
@ -2475,9 +2407,6 @@ dict_stats_save(
|
|||
dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8),
|
||||
table_utf8, sizeof(table_utf8));
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
/* MySQL's timestamp is 4 byte, so we use
|
||||
pars_info_add_int4_literal() which takes a lint arg, so "now" is
|
||||
lint */
|
||||
|
@ -2494,6 +2423,8 @@ dict_stats_save(
|
|||
pars_info_add_ull_literal(pinfo, "sum_of_other_index_sizes",
|
||||
table->stat_sum_of_other_index_sizes);
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
pinfo,
|
||||
"PROCEDURE TABLE_STATS_SAVE () IS\n"
|
||||
|
@ -2514,33 +2445,18 @@ dict_stats_save(
|
|||
":clustered_index_size,\n"
|
||||
":sum_of_other_index_sizes\n"
|
||||
");\n"
|
||||
"END;", NULL);
|
||||
"END;", trx);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
ib::error() << "Cannot save table statistics for table "
|
||||
<< table->name << ": " << ut_strerr(ret);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
dict_stats_snapshot_free(table);
|
||||
|
||||
return(ret);
|
||||
}
|
||||
|
||||
trx_t* trx = trx_allocate_for_background();
|
||||
|
||||
if (srv_read_only_mode) {
|
||||
trx_start_internal_read_only(trx);
|
||||
} else {
|
||||
trx_start_internal(trx);
|
||||
}
|
||||
|
||||
dict_index_t* index;
|
||||
index_map_t indexes(
|
||||
(ut_strcmp_functor()),
|
||||
index_map_t_allocator(mem_key_dict_stats_index_map_t));
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Below we do all the modifications in innodb_index_stats in a single
|
||||
transaction for performance reasons. Modifying more than one row in a
|
||||
single transaction may deadlock with other transactions if they
|
||||
|
@ -2553,18 +2469,17 @@ dict_stats_save(
|
|||
stat_name). This is why below we sort the indexes by name and then
|
||||
for each index, do the mods ordered by stat_name. */
|
||||
|
||||
for (index = dict_table_get_first_index(table);
|
||||
for (dict_index_t* index = dict_table_get_first_index(table);
|
||||
index != NULL;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
|
||||
indexes[index->name] = index;
|
||||
}
|
||||
|
||||
index_map_t::const_iterator it;
|
||||
for (index_map_t::const_iterator it = indexes.begin();
|
||||
it != indexes.end(); ++it) {
|
||||
|
||||
for (it = indexes.begin(); it != indexes.end(); ++it) {
|
||||
|
||||
index = it->second;
|
||||
dict_index_t* index = it->second;
|
||||
|
||||
if (only_for_index != NULL && index->id != *only_for_index) {
|
||||
continue;
|
||||
|
@ -2627,13 +2542,11 @@ dict_stats_save(
|
|||
}
|
||||
}
|
||||
|
||||
trx_commit_for_mysql(trx);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
end:
|
||||
trx_free_for_background(trx);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
ib::error() << "Cannot save table statistics for table "
|
||||
<< table->name << ": " << ut_strerr(ret);
|
||||
}
|
||||
|
||||
dict_stats_snapshot_free(table);
|
||||
|
||||
|
@ -3131,12 +3044,13 @@ dict_stats_empty_defrag_modified_counter(
|
|||
}
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Fetches or calculates new estimates for index statistics. */
|
||||
void
|
||||
dict_stats_update_for_index(
|
||||
/*========================*/
|
||||
dict_index_t* index) /*!< in/out: index */
|
||||
/** Calculate index statistics.
|
||||
@param[in,out] index index tree
|
||||
@param[in,out] trx transaction (for persistent statistics)
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_update_for_index(dict_index_t* index, trx_t* trx)
|
||||
{
|
||||
DBUG_ENTER("dict_stats_update_for_index");
|
||||
|
||||
|
@ -3148,8 +3062,8 @@ dict_stats_update_for_index(
|
|||
dict_table_stats_lock(index->table, RW_X_LATCH);
|
||||
dict_stats_analyze_index(index);
|
||||
dict_table_stats_unlock(index->table, RW_X_LATCH);
|
||||
dict_stats_save(index->table, &index->id);
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(dict_stats_save(index->table, trx,
|
||||
&index->id));
|
||||
}
|
||||
/* else */
|
||||
|
||||
|
@ -3172,22 +3086,20 @@ dict_stats_update_for_index(
|
|||
dict_stats_update_transient_for_index(index);
|
||||
dict_table_stats_unlock(index->table, RW_X_LATCH);
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Calculates new estimates for table and index statistics. The statistics
|
||||
are used in query optimization.
|
||||
@return DB_SUCCESS or error code */
|
||||
/** Calculate new estimates for table and index statistics.
|
||||
@param[in,out] table table
|
||||
@param[in] stats_upd_option how to update statistics
|
||||
@param[in,out] trx transaction
|
||||
@return DB_* error code or DB_SUCCESS */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_update(
|
||||
/*==============*/
|
||||
dict_table_t* table, /*!< in/out: table */
|
||||
dict_stats_upd_option_t stats_upd_option)
|
||||
/*!< in: whether to (re) calc
|
||||
the stats or to fetch them from
|
||||
the persistent statistics
|
||||
storage */
|
||||
dict_table_t* table,
|
||||
dict_stats_upd_option_t stats_upd_option,
|
||||
trx_t* trx)
|
||||
{
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
|
||||
|
@ -3232,7 +3144,7 @@ dict_stats_update(
|
|||
return(err);
|
||||
}
|
||||
|
||||
err = dict_stats_save(table, NULL);
|
||||
err = dict_stats_save(table, trx);
|
||||
|
||||
return(err);
|
||||
}
|
||||
|
@ -3268,7 +3180,7 @@ dict_stats_update(
|
|||
|
||||
if (dict_stats_persistent_storage_check(false)) {
|
||||
|
||||
return(dict_stats_save(table, NULL));
|
||||
return(dict_stats_save(table, trx));
|
||||
}
|
||||
|
||||
return(DB_STATS_DO_NOT_EXIST);
|
||||
|
@ -3347,9 +3259,9 @@ dict_stats_update(
|
|||
}
|
||||
|
||||
if (dict_stats_auto_recalc_is_enabled(table)) {
|
||||
return(dict_stats_update(
|
||||
table,
|
||||
DICT_STATS_RECALC_PERSISTENT));
|
||||
return dict_stats_update(
|
||||
table, DICT_STATS_RECALC_PERSISTENT,
|
||||
trx);
|
||||
}
|
||||
|
||||
ib::info() << "Trying to use table " << table->name
|
||||
|
@ -3397,25 +3309,20 @@ transient:
|
|||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Removes the information for a particular index's stats from the persistent
|
||||
storage if it exists and if there is data stored for this index.
|
||||
This function creates its own trx and commits it.
|
||||
A note from Marko why we cannot edit user and sys_* tables in one trx:
|
||||
marko: The problem is that ibuf merges should be disabled while we are
|
||||
rolling back dict transactions.
|
||||
marko: If ibuf merges are not disabled, we need to scan the *.ibd files.
|
||||
But we shouldn't open *.ibd files before we have rolled back dict
|
||||
transactions and opened the SYS_* records for the *.ibd files.
|
||||
/** Remove the persistent statistics for an index.
|
||||
@param[in] db_and_table schema and table name, e.g., 'db/table'
|
||||
@param[in] iname index name
|
||||
@param[out] errstr error message (when not returning DB_SUCCESS)
|
||||
@param[in] errstr_sz sizeof errstr
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_drop_index(
|
||||
/*==================*/
|
||||
const char* db_and_table,/*!< in: db and table, e.g. 'db/table' */
|
||||
const char* iname, /*!< in: index name */
|
||||
char* errstr, /*!< out: error message if != DB_SUCCESS
|
||||
is returned */
|
||||
ulint errstr_sz)/*!< in: size of the errstr buffer */
|
||||
const char* db_and_table,
|
||||
const char* iname,
|
||||
char* errstr,
|
||||
size_t errstr_sz,
|
||||
trx_t* trx)
|
||||
{
|
||||
char db_utf8[MAX_DB_UTF8_LEN];
|
||||
char table_utf8[MAX_TABLE_UTF8_LEN];
|
||||
|
@ -3431,6 +3338,11 @@ dict_stats_drop_index(
|
|||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
if (!dict_stats_persistent_storage_check(false)) {
|
||||
/* Statistics tables do not exist. */
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
dict_fs2utf8(db_and_table, db_utf8, sizeof(db_utf8),
|
||||
table_utf8, sizeof(table_utf8));
|
||||
|
||||
|
@ -3442,7 +3354,6 @@ dict_stats_drop_index(
|
|||
|
||||
pars_info_add_str_literal(pinfo, "index_name", iname);
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
|
@ -3453,16 +3364,18 @@ dict_stats_drop_index(
|
|||
"database_name = :database_name AND\n"
|
||||
"table_name = :table_name AND\n"
|
||||
"index_name = :index_name;\n"
|
||||
"END;\n", NULL);
|
||||
"END;\n", trx);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
if (ret == DB_STATS_DO_NOT_EXIST) {
|
||||
ret = DB_SUCCESS;
|
||||
}
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
switch (ret) {
|
||||
case DB_STATS_DO_NOT_EXIST:
|
||||
case DB_SUCCESS:
|
||||
return(DB_SUCCESS);
|
||||
case DB_QUE_THR_SUSPENDED:
|
||||
ret = DB_LOCK_WAIT;
|
||||
/* fall through */
|
||||
default:
|
||||
snprintf(errstr, errstr_sz,
|
||||
"Unable to delete statistics for index %s"
|
||||
" from %s%s: %s. They can be deleted later using"
|
||||
|
@ -3488,98 +3401,71 @@ dict_stats_drop_index(
|
|||
return(ret);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Executes
|
||||
DELETE FROM mysql.innodb_table_stats
|
||||
WHERE database_name = '...' AND table_name = '...';
|
||||
Creates its own transaction and commits it.
|
||||
/** Delete table statistics.
|
||||
@param[in] db schema name
|
||||
@param[in] t table name
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INLINE
|
||||
dberr_t
|
||||
dict_stats_delete_from_table_stats(
|
||||
/*===============================*/
|
||||
const char* database_name, /*!< in: database name, e.g. 'db' */
|
||||
const char* table_name) /*!< in: table name, e.g. 'table' */
|
||||
dict_stats_delete_from_table_stats(const char* db, const char* t, trx_t* trx)
|
||||
{
|
||||
pars_info_t* pinfo;
|
||||
dberr_t ret;
|
||||
pars_info_t* pinfo = pars_info_create();
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
pars_info_add_str_literal(pinfo, "database_name", db);
|
||||
pars_info_add_str_literal(pinfo, "table_name", t);
|
||||
|
||||
pinfo = pars_info_create();
|
||||
|
||||
pars_info_add_str_literal(pinfo, "database_name", database_name);
|
||||
pars_info_add_str_literal(pinfo, "table_name", table_name);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
return dict_stats_exec_sql(
|
||||
pinfo,
|
||||
"PROCEDURE DELETE_FROM_TABLE_STATS () IS\n"
|
||||
"BEGIN\n"
|
||||
"DELETE FROM \"" TABLE_STATS_NAME "\" WHERE\n"
|
||||
"database_name = :database_name AND\n"
|
||||
"table_name = :table_name;\n"
|
||||
"END;\n", NULL);
|
||||
|
||||
return(ret);
|
||||
"END;\n", trx);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Executes
|
||||
DELETE FROM mysql.innodb_index_stats
|
||||
WHERE database_name = '...' AND table_name = '...';
|
||||
Creates its own transaction and commits it.
|
||||
/** Delete index statistics.
|
||||
@param[in] db schema name
|
||||
@param[in] t table name
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INLINE
|
||||
dberr_t
|
||||
dict_stats_delete_from_index_stats(
|
||||
/*===============================*/
|
||||
const char* database_name, /*!< in: database name, e.g. 'db' */
|
||||
const char* table_name) /*!< in: table name, e.g. 'table' */
|
||||
dict_stats_delete_from_index_stats(const char* db, const char* t, trx_t* trx)
|
||||
{
|
||||
pars_info_t* pinfo;
|
||||
dberr_t ret;
|
||||
pars_info_t* pinfo = pars_info_create();
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
pars_info_add_str_literal(pinfo, "database_name", db);
|
||||
pars_info_add_str_literal(pinfo, "table_name", t);
|
||||
|
||||
pinfo = pars_info_create();
|
||||
|
||||
pars_info_add_str_literal(pinfo, "database_name", database_name);
|
||||
pars_info_add_str_literal(pinfo, "table_name", table_name);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
return dict_stats_exec_sql(
|
||||
pinfo,
|
||||
"PROCEDURE DELETE_FROM_INDEX_STATS () IS\n"
|
||||
"BEGIN\n"
|
||||
"DELETE FROM \"" INDEX_STATS_NAME "\" WHERE\n"
|
||||
"database_name = :database_name AND\n"
|
||||
"table_name = :table_name;\n"
|
||||
"END;\n", NULL);
|
||||
|
||||
return(ret);
|
||||
"END;\n", trx);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Removes the statistics for a table and all of its indexes from the
|
||||
persistent statistics storage if it exists and if there is data stored for
|
||||
the table. This function creates its own transaction and commits it.
|
||||
/** Remove the persistent statistics for a table and all of its indexes.
|
||||
@param[in] db_and_table schema and table name, e.g., 'db/table'
|
||||
@param[out] errstr error message (when not returning DB_SUCCESS)
|
||||
@param[in] errstr_sz sizeof errstr
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_drop_table(
|
||||
/*==================*/
|
||||
const char* db_and_table, /*!< in: db and table, e.g. 'db/table' */
|
||||
char* errstr, /*!< out: error message
|
||||
if != DB_SUCCESS is returned */
|
||||
ulint errstr_sz) /*!< in: size of errstr buffer */
|
||||
const char* db_and_table,
|
||||
char* errstr,
|
||||
size_t errstr_sz,
|
||||
trx_t* trx)
|
||||
{
|
||||
char db_utf8[MAX_DB_UTF8_LEN];
|
||||
char table_utf8[MAX_TABLE_UTF8_LEN];
|
||||
dberr_t ret;
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
/* skip tables that do not contain a database name
|
||||
e.g. if we are dropping SYS_TABLES */
|
||||
if (strchr(db_and_table, '/') == NULL) {
|
||||
|
@ -3594,24 +3480,32 @@ dict_stats_drop_table(
|
|||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
if (!dict_stats_persistent_storage_check(true)) {
|
||||
/* Statistics tables do not exist. */
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
dict_fs2utf8(db_and_table, db_utf8, sizeof(db_utf8),
|
||||
table_utf8, sizeof(table_utf8));
|
||||
|
||||
ret = dict_stats_delete_from_table_stats(db_utf8, table_utf8);
|
||||
ret = dict_stats_delete_from_table_stats(db_utf8, table_utf8, trx);
|
||||
|
||||
if (ret == DB_SUCCESS) {
|
||||
ret = dict_stats_delete_from_index_stats(db_utf8, table_utf8);
|
||||
ret = dict_stats_delete_from_index_stats(
|
||||
db_utf8, table_utf8, trx);
|
||||
}
|
||||
|
||||
if (ret == DB_STATS_DO_NOT_EXIST) {
|
||||
ret = DB_SUCCESS;
|
||||
}
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
|
||||
switch (ret) {
|
||||
case DB_SUCCESS:
|
||||
case DB_STATS_DO_NOT_EXIST:
|
||||
return(DB_SUCCESS);
|
||||
case DB_QUE_THR_SUSPENDED:
|
||||
ret = DB_LOCK_WAIT;
|
||||
/* fall through */
|
||||
default:
|
||||
snprintf(errstr, errstr_sz,
|
||||
"Unable to delete statistics for table %s.%s: %s."
|
||||
" They can be deleted later using"
|
||||
"Unable to delete statistics for table %s.%s: %s. "
|
||||
"They can be deleted later using "
|
||||
|
||||
" DELETE FROM %s WHERE"
|
||||
" database_name = '%s' AND"
|
||||
|
@ -3634,36 +3528,30 @@ dict_stats_drop_table(
|
|||
return(ret);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Executes
|
||||
UPDATE mysql.innodb_table_stats SET
|
||||
database_name = '...', table_name = '...'
|
||||
WHERE database_name = '...' AND table_name = '...';
|
||||
Creates its own transaction and commits it.
|
||||
/** Rename table statistics.
|
||||
@param[in] old_dbname_utf8 old schema name
|
||||
@param[in] old_tablename_utf8 old table name
|
||||
@param[in] new_dbname_utf8 new schema name
|
||||
@param[in] new_tablename_utf8 new schema name
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INLINE
|
||||
dberr_t
|
||||
dict_stats_rename_table_in_table_stats(
|
||||
/*===================================*/
|
||||
const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */
|
||||
const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */
|
||||
const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */
|
||||
const char* new_tablename_utf8)/*!< in: table name, e.g. 'newtable' */
|
||||
dict_stats_rename_in_table_stats(
|
||||
const char* old_dbname_utf8,
|
||||
const char* old_tablename_utf8,
|
||||
const char* new_dbname_utf8,
|
||||
const char* new_tablename_utf8,
|
||||
trx_t* trx)
|
||||
{
|
||||
pars_info_t* pinfo;
|
||||
dberr_t ret;
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
pinfo = pars_info_create();
|
||||
pars_info_t* pinfo = pars_info_create();
|
||||
|
||||
pars_info_add_str_literal(pinfo, "old_dbname_utf8", old_dbname_utf8);
|
||||
pars_info_add_str_literal(pinfo, "old_tablename_utf8", old_tablename_utf8);
|
||||
pars_info_add_str_literal(pinfo, "new_dbname_utf8", new_dbname_utf8);
|
||||
pars_info_add_str_literal(pinfo, "new_tablename_utf8", new_tablename_utf8);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
return dict_stats_exec_sql(
|
||||
pinfo,
|
||||
"PROCEDURE RENAME_TABLE_IN_TABLE_STATS () IS\n"
|
||||
"BEGIN\n"
|
||||
|
@ -3673,41 +3561,33 @@ dict_stats_rename_table_in_table_stats(
|
|||
"WHERE\n"
|
||||
"database_name = :old_dbname_utf8 AND\n"
|
||||
"table_name = :old_tablename_utf8;\n"
|
||||
"END;\n", NULL);
|
||||
|
||||
return(ret);
|
||||
"END;\n", trx);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Executes
|
||||
UPDATE mysql.innodb_index_stats SET
|
||||
database_name = '...', table_name = '...'
|
||||
WHERE database_name = '...' AND table_name = '...';
|
||||
Creates its own transaction and commits it.
|
||||
/** Rename index statistics.
|
||||
@param[in] old_dbname_utf8 old schema name
|
||||
@param[in] old_tablename_utf8 old table name
|
||||
@param[in] new_dbname_utf8 new schema name
|
||||
@param[in] new_tablename_utf8 new schema name
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INLINE
|
||||
dberr_t
|
||||
dict_stats_rename_table_in_index_stats(
|
||||
/*===================================*/
|
||||
const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */
|
||||
const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */
|
||||
const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */
|
||||
const char* new_tablename_utf8)/*!< in: table name, e.g. 'newtable' */
|
||||
dict_stats_rename_in_index_stats(
|
||||
const char* old_dbname_utf8,
|
||||
const char* old_tablename_utf8,
|
||||
const char* new_dbname_utf8,
|
||||
const char* new_tablename_utf8,
|
||||
trx_t* trx)
|
||||
{
|
||||
pars_info_t* pinfo;
|
||||
dberr_t ret;
|
||||
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
pinfo = pars_info_create();
|
||||
pars_info_t* pinfo = pars_info_create();
|
||||
|
||||
pars_info_add_str_literal(pinfo, "old_dbname_utf8", old_dbname_utf8);
|
||||
pars_info_add_str_literal(pinfo, "old_tablename_utf8", old_tablename_utf8);
|
||||
pars_info_add_str_literal(pinfo, "new_dbname_utf8", new_dbname_utf8);
|
||||
pars_info_add_str_literal(pinfo, "new_tablename_utf8", new_tablename_utf8);
|
||||
|
||||
ret = dict_stats_exec_sql(
|
||||
return dict_stats_exec_sql(
|
||||
pinfo,
|
||||
"PROCEDURE RENAME_TABLE_IN_INDEX_STATS () IS\n"
|
||||
"BEGIN\n"
|
||||
|
@ -3717,23 +3597,23 @@ dict_stats_rename_table_in_index_stats(
|
|||
"WHERE\n"
|
||||
"database_name = :old_dbname_utf8 AND\n"
|
||||
"table_name = :old_tablename_utf8;\n"
|
||||
"END;\n", NULL);
|
||||
|
||||
return(ret);
|
||||
"END;\n", trx);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Renames a table in InnoDB persistent stats storage.
|
||||
This function creates its own transaction and commits it.
|
||||
/** Rename a table in the InnoDB persistent statistics storage.
|
||||
@param[in] old_name old schema and table name, e.g., 'db/table'
|
||||
@param[in] new_name new schema and table name, e.g., 'db/table'
|
||||
@param[out] errstr error message (when not returning DB_SUCCESS)
|
||||
@param[in] errstr_sz sizeof errstr
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_rename_table(
|
||||
/*====================*/
|
||||
const char* old_name, /*!< in: old name, e.g. 'db/table' */
|
||||
const char* new_name, /*!< in: new name, e.g. 'db/table' */
|
||||
char* errstr, /*!< out: error string if != DB_SUCCESS
|
||||
is returned */
|
||||
size_t errstr_sz) /*!< in: errstr size */
|
||||
const char* old_name,
|
||||
const char* new_name,
|
||||
char* errstr,
|
||||
size_t errstr_sz,
|
||||
trx_t* trx)
|
||||
{
|
||||
char old_db_utf8[MAX_DB_UTF8_LEN];
|
||||
char new_db_utf8[MAX_DB_UTF8_LEN];
|
||||
|
@ -3741,9 +3621,6 @@ dict_stats_rename_table(
|
|||
char new_table_utf8[MAX_TABLE_UTF8_LEN];
|
||||
dberr_t ret;
|
||||
|
||||
ut_ad(!rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
|
||||
/* skip innodb_table_stats and innodb_index_stats themselves */
|
||||
if (strcmp(old_name, TABLE_STATS_NAME) == 0
|
||||
|| strcmp(old_name, INDEX_STATS_NAME) == 0
|
||||
|
@ -3753,104 +3630,95 @@ dict_stats_rename_table(
|
|||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
if (!dict_stats_persistent_storage_check(false)) {
|
||||
/* Statistics tables do not exist. */
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
dict_fs2utf8(old_name, old_db_utf8, sizeof(old_db_utf8),
|
||||
old_table_utf8, sizeof(old_table_utf8));
|
||||
|
||||
dict_fs2utf8(new_name, new_db_utf8, sizeof(new_db_utf8),
|
||||
new_table_utf8, sizeof(new_table_utf8));
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ulint n_attempts = 0;
|
||||
do {
|
||||
n_attempts++;
|
||||
trx_savept_t savept = trx_savept_take(trx);
|
||||
|
||||
ret = dict_stats_rename_table_in_table_stats(
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_rename_in_table_stats(
|
||||
old_db_utf8, old_table_utf8,
|
||||
new_db_utf8, new_table_utf8);
|
||||
new_db_utf8, new_table_utf8, trx);
|
||||
|
||||
if (ret == DB_DUPLICATE_KEY) {
|
||||
dict_stats_delete_from_table_stats(
|
||||
new_db_utf8, new_table_utf8);
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
if (ret == DB_STATS_DO_NOT_EXIST) {
|
||||
ret = DB_SUCCESS;
|
||||
}
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
os_thread_sleep(200000 /* 0.2 sec */);
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
switch (ret) {
|
||||
case DB_DUPLICATE_KEY:
|
||||
trx_rollback_to_savepoint(trx, &savept);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_stats_delete_from_table_stats(
|
||||
new_db_utf8, new_table_utf8, trx);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
/* fall through */
|
||||
case DB_DEADLOCK:
|
||||
case DB_LOCK_WAIT_TIMEOUT:
|
||||
os_thread_sleep(200000 /* 0.2 sec */);
|
||||
continue;
|
||||
case DB_STATS_DO_NOT_EXIST:
|
||||
ret = DB_SUCCESS;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} while ((ret == DB_DEADLOCK
|
||||
|| ret == DB_DUPLICATE_KEY
|
||||
|| ret == DB_LOCK_WAIT_TIMEOUT)
|
||||
&& n_attempts < 5);
|
||||
|
||||
break;
|
||||
} while (++n_attempts < 5);
|
||||
|
||||
const char* table_name = TABLE_STATS_NAME_PRINT;
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
snprintf(errstr, errstr_sz,
|
||||
"Unable to rename statistics from"
|
||||
" %s.%s to %s.%s in %s: %s."
|
||||
" They can be renamed later using"
|
||||
|
||||
" UPDATE %s SET"
|
||||
" database_name = '%s',"
|
||||
" table_name = '%s'"
|
||||
" WHERE"
|
||||
" database_name = '%s' AND"
|
||||
" table_name = '%s';",
|
||||
|
||||
old_db_utf8, old_table_utf8,
|
||||
new_db_utf8, new_table_utf8,
|
||||
TABLE_STATS_NAME_PRINT,
|
||||
ut_strerr(ret),
|
||||
|
||||
TABLE_STATS_NAME_PRINT,
|
||||
new_db_utf8, new_table_utf8,
|
||||
old_db_utf8, old_table_utf8);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
return(ret);
|
||||
goto err_exit;
|
||||
}
|
||||
/* else */
|
||||
|
||||
table_name = INDEX_STATS_NAME_PRINT;
|
||||
|
||||
n_attempts = 0;
|
||||
do {
|
||||
n_attempts++;
|
||||
trx_savept_t savept = trx_savept_take(trx);
|
||||
|
||||
ret = dict_stats_rename_table_in_index_stats(
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_rename_in_index_stats(
|
||||
old_db_utf8, old_table_utf8,
|
||||
new_db_utf8, new_table_utf8);
|
||||
new_db_utf8, new_table_utf8, trx);
|
||||
|
||||
if (ret == DB_DUPLICATE_KEY) {
|
||||
dict_stats_delete_from_index_stats(
|
||||
new_db_utf8, new_table_utf8);
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
if (ret == DB_STATS_DO_NOT_EXIST) {
|
||||
ret = DB_SUCCESS;
|
||||
}
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
os_thread_sleep(200000 /* 0.2 sec */);
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
switch (ret) {
|
||||
case DB_DUPLICATE_KEY:
|
||||
trx_rollback_to_savepoint(trx, &savept);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_stats_delete_from_index_stats(
|
||||
new_db_utf8, new_table_utf8, trx);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
/* fall through */
|
||||
case DB_DEADLOCK:
|
||||
case DB_LOCK_WAIT_TIMEOUT:
|
||||
os_thread_sleep(200000 /* 0.2 sec */);
|
||||
continue;
|
||||
case DB_STATS_DO_NOT_EXIST:
|
||||
ret = DB_SUCCESS;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} while ((ret == DB_DEADLOCK
|
||||
|| ret == DB_DUPLICATE_KEY
|
||||
|| ret == DB_LOCK_WAIT_TIMEOUT)
|
||||
&& n_attempts < 5);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
break;
|
||||
} while (++n_attempts < 5);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
err_exit:
|
||||
snprintf(errstr, errstr_sz,
|
||||
"Unable to rename statistics from"
|
||||
" %s.%s to %s.%s in %s: %s."
|
||||
|
@ -3865,10 +3733,10 @@ dict_stats_rename_table(
|
|||
|
||||
old_db_utf8, old_table_utf8,
|
||||
new_db_utf8, new_table_utf8,
|
||||
INDEX_STATS_NAME_PRINT,
|
||||
table_name,
|
||||
ut_strerr(ret),
|
||||
|
||||
INDEX_STATS_NAME_PRINT,
|
||||
table_name,
|
||||
new_db_utf8, new_table_utf8,
|
||||
old_db_utf8, old_table_utf8);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ Created Apr 25, 2012 Vasil Dimov
|
|||
#include "srv0start.h"
|
||||
#include "ut0new.h"
|
||||
#include "fil0fil.h"
|
||||
#include "trx0trx.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
|
@ -180,7 +181,7 @@ dict_stats_update_if_needed(dict_table_t* table)
|
|||
|
||||
if (counter > threshold) {
|
||||
/* this will reset table->stat_modified_counter to 0 */
|
||||
dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
|
||||
dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -280,10 +281,10 @@ dict_stats_thread_init()
|
|||
1) the background stats gathering thread before any other latch
|
||||
and released without latching anything else in between (thus
|
||||
any level would do here)
|
||||
2) from row_update_statistics_if_needed()
|
||||
2) from dict_stats_update_if_needed()
|
||||
and released without latching anything else in between. We know
|
||||
that dict_sys->mutex (SYNC_DICT) is not acquired when
|
||||
row_update_statistics_if_needed() is called and it may be acquired
|
||||
dict_stats_update_if_needed() is called and it may be acquired
|
||||
inside that function (thus a level <=SYNC_DICT would do).
|
||||
3) from row_drop_table_for_mysql() after dict_sys->mutex (SYNC_DICT)
|
||||
and dict_operation_lock (SYNC_DICT_OPERATION) have been locked
|
||||
|
@ -323,8 +324,7 @@ Get the first table that has been added for auto recalc and eventually
|
|||
update its stats. */
|
||||
static
|
||||
void
|
||||
dict_stats_process_entry_from_recalc_pool()
|
||||
/*=======================================*/
|
||||
dict_stats_process_entry_from_recalc_pool(trx_t* trx)
|
||||
{
|
||||
table_id_t table_id;
|
||||
|
||||
|
@ -378,8 +378,11 @@ dict_stats_process_entry_from_recalc_pool()
|
|||
dict_stats_recalc_pool_add(table);
|
||||
|
||||
} else {
|
||||
|
||||
dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT);
|
||||
++trx->will_lock;
|
||||
dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT, trx);
|
||||
if (trx->state != TRX_STATE_NOT_STARTED) {
|
||||
trx_commit_for_mysql(trx);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
@ -440,6 +443,9 @@ DECLARE_THREAD(dict_stats_thread)(void*)
|
|||
*/
|
||||
#endif /* UNIV_PFS_THREAD */
|
||||
|
||||
trx_t* trx = trx_allocate_for_background();
|
||||
ut_d(trx->persistent_stats = true);
|
||||
|
||||
while (!dict_stats_start_shutdown) {
|
||||
|
||||
/* Wake up periodically even if not signaled. This is
|
||||
|
@ -465,12 +471,14 @@ DECLARE_THREAD(dict_stats_thread)(void*)
|
|||
break;
|
||||
}
|
||||
|
||||
dict_stats_process_entry_from_recalc_pool();
|
||||
dict_defrag_process_entries_from_defrag_pool();
|
||||
dict_stats_process_entry_from_recalc_pool(trx);
|
||||
dict_defrag_process_entries_from_defrag_pool(trx);
|
||||
|
||||
os_event_reset(dict_stats_event);
|
||||
}
|
||||
|
||||
ut_d(trx->persistent_stats = false);
|
||||
trx_free_for_background(trx);
|
||||
srv_dict_stats_thread_active = false;
|
||||
|
||||
os_event_set(dict_stats_shutdown_event);
|
||||
|
|
|
@ -6464,7 +6464,21 @@ no_such_table:
|
|||
|
||||
/* No point to init any statistics if tablespace is still encrypted. */
|
||||
if (ib_table->is_readable()) {
|
||||
dict_stats_init(ib_table);
|
||||
trx_t* trx = check_trx_exists(thd);
|
||||
bool alloc = !trx_state_eq(trx, TRX_STATE_NOT_STARTED);
|
||||
|
||||
if (alloc) {
|
||||
trx = trx_allocate_for_background();
|
||||
}
|
||||
ut_ad(!trx->persistent_stats);
|
||||
ut_d(trx->persistent_stats = true);
|
||||
++trx->will_lock;
|
||||
dict_stats_init(ib_table, trx);
|
||||
innobase_commit_low(trx);
|
||||
ut_d(trx->persistent_stats = false);
|
||||
if (alloc) {
|
||||
trx_free_for_background(trx);
|
||||
}
|
||||
} else {
|
||||
ib_table->stat_initialized = 1;
|
||||
}
|
||||
|
@ -13263,7 +13277,9 @@ create_table_info_t::create_table_update_dict()
|
|||
|
||||
innobase_copy_frm_flags_from_create_info(innobase_table, m_create_info);
|
||||
|
||||
dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE);
|
||||
++m_trx->will_lock;
|
||||
dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE, m_trx);
|
||||
innobase_commit_low(m_trx);
|
||||
|
||||
if (innobase_table) {
|
||||
/* We update the highest file format in the system table
|
||||
|
@ -13529,7 +13545,8 @@ ha_innobase::discard_or_import_tablespace(
|
|||
|
||||
/* Adjust the persistent statistics. */
|
||||
ret = dict_stats_update(dict_table,
|
||||
DICT_STATS_RECALC_PERSISTENT);
|
||||
DICT_STATS_RECALC_PERSISTENT,
|
||||
m_prebuilt->trx);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
push_warning_printf(
|
||||
|
@ -13537,8 +13554,11 @@ ha_innobase::discard_or_import_tablespace(
|
|||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO,
|
||||
"Error updating stats for table '%s'"
|
||||
" after table rebuild: %s",
|
||||
" after table import: %s",
|
||||
dict_table->name.m_name, ut_strerr(ret));
|
||||
trx_rollback_to_savepoint(m_prebuilt->trx, NULL);
|
||||
} else {
|
||||
trx_commit_for_mysql(m_prebuilt->trx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14017,8 +14037,6 @@ ha_innobase::rename_table(
|
|||
|
||||
innobase_commit_low(trx);
|
||||
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
char norm_from[MAX_FULL_NAME_LEN];
|
||||
char norm_to[MAX_FULL_NAME_LEN];
|
||||
|
@ -14028,17 +14046,23 @@ ha_innobase::rename_table(
|
|||
normalize_table_name(norm_from, from);
|
||||
normalize_table_name(norm_to, to);
|
||||
|
||||
++trx->will_lock;
|
||||
ret = dict_stats_rename_table(norm_from, norm_to,
|
||||
errstr, sizeof(errstr));
|
||||
errstr, sizeof errstr, trx);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
trx_rollback_to_savepoint(trx, NULL);
|
||||
ib::error() << errstr;
|
||||
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_LOCK_WAIT_TIMEOUT, errstr);
|
||||
} else {
|
||||
innobase_commit_low(trx);
|
||||
}
|
||||
}
|
||||
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
/* Add a special case to handle the Duplicated Key error
|
||||
and return DB_ERROR instead.
|
||||
This is to avoid a possible SIGSEGV error from mysql error
|
||||
|
@ -14575,17 +14599,33 @@ ha_innobase::info_low(
|
|||
}
|
||||
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
ret = dict_stats_update(ib_table, opt);
|
||||
/* Do not use prebuilt->trx in case this is
|
||||
called in the middle of a transaction. We
|
||||
should commit the transaction after
|
||||
dict_stats_update() in order not to hog locks
|
||||
on the mysql.innodb_table_stats,
|
||||
mysql.innodb_index_stats tables. */
|
||||
trx_t* trx = trx_allocate_for_background();
|
||||
ut_d(trx->persistent_stats = true);
|
||||
++trx->will_lock;
|
||||
ret = dict_stats_update(ib_table, opt, trx);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
m_prebuilt->trx->op_info = "";
|
||||
DBUG_RETURN(HA_ERR_GENERIC);
|
||||
trx_rollback_to_savepoint(trx, NULL);
|
||||
} else {
|
||||
m_prebuilt->trx->op_info =
|
||||
"returning various info to MySQL";
|
||||
trx_commit_for_mysql(trx);
|
||||
}
|
||||
|
||||
m_prebuilt->trx->op_info =
|
||||
"returning various info to MariaDB";
|
||||
}
|
||||
ut_d(trx->persistent_stats = false);
|
||||
trx_free_for_background(trx);
|
||||
|
||||
if (ret != DB_SUCCESS) {
|
||||
DBUG_RETURN(HA_ERR_GENERIC);
|
||||
}
|
||||
}
|
||||
|
||||
stats.update_time = (ulong) ib_table->update_time;
|
||||
}
|
||||
|
|
|
@ -8213,29 +8213,27 @@ commit_cache_norebuild(
|
|||
/** Adjust the persistent statistics after non-rebuilding ALTER TABLE.
|
||||
Remove statistics for dropped indexes, add statistics for created indexes
|
||||
and rename statistics for renamed indexes.
|
||||
@param ha_alter_info Data used during in-place alter
|
||||
@param ctx In-place ALTER TABLE context
|
||||
@param altered_table MySQL table that is being altered
|
||||
@param table_name Table name in MySQL
|
||||
@param thd MySQL connection
|
||||
*/
|
||||
@param ha_alter_info Data used during in-place alter
|
||||
@param ctx In-place ALTER TABLE context
|
||||
@param table_name Table name in MySQL
|
||||
@param trx transaction
|
||||
@return error code */
|
||||
static
|
||||
void
|
||||
dberr_t
|
||||
alter_stats_norebuild(
|
||||
/*==================*/
|
||||
Alter_inplace_info* ha_alter_info,
|
||||
ha_innobase_inplace_ctx* ctx,
|
||||
TABLE* altered_table,
|
||||
const char* table_name,
|
||||
THD* thd)
|
||||
trx_t* trx)
|
||||
{
|
||||
dberr_t err = DB_SUCCESS;
|
||||
ulint i;
|
||||
|
||||
DBUG_ENTER("alter_stats_norebuild");
|
||||
DBUG_ASSERT(!ctx->need_rebuild());
|
||||
|
||||
if (!dict_stats_is_persistent_enabled(ctx->new_table)) {
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
/* Delete corresponding rows from the stats table. We do this
|
||||
|
@ -8264,10 +8262,13 @@ alter_stats_norebuild(
|
|||
|
||||
char errstr[1024];
|
||||
|
||||
if (dict_stats_drop_index(
|
||||
ctx->new_table->name.m_name, key->name,
|
||||
errstr, sizeof errstr) != DB_SUCCESS) {
|
||||
push_warning(thd,
|
||||
dberr_t err2 = dict_stats_drop_index(
|
||||
ctx->new_table->name.m_name, key->name,
|
||||
errstr, sizeof errstr, trx);
|
||||
|
||||
if (err2 != DB_SUCCESS) {
|
||||
err = err2;
|
||||
push_warning(trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_LOCK_WAIT_TIMEOUT, errstr);
|
||||
}
|
||||
|
@ -8303,34 +8304,36 @@ alter_stats_norebuild(
|
|||
DBUG_ASSERT(index->table == ctx->new_table);
|
||||
|
||||
if (!(index->type & DICT_FTS)) {
|
||||
dict_stats_init(ctx->new_table);
|
||||
dict_stats_update_for_index(index);
|
||||
dict_stats_init(ctx->new_table, trx);
|
||||
dberr_t err2 = dict_stats_update_for_index(index, trx);
|
||||
if (err2 != DB_SUCCESS) {
|
||||
err = err2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
/** Adjust the persistent statistics after rebuilding ALTER TABLE.
|
||||
Remove statistics for dropped indexes, add statistics for created indexes
|
||||
and rename statistics for renamed indexes.
|
||||
@param table InnoDB table that was rebuilt by ALTER TABLE
|
||||
@param table_name Table name in MySQL
|
||||
@param thd MySQL connection
|
||||
*/
|
||||
@param table InnoDB table that was rebuilt by ALTER TABLE
|
||||
@param table_name Table name in MySQL
|
||||
@param trx transaction
|
||||
@return error code */
|
||||
static
|
||||
void
|
||||
dberr_t
|
||||
alter_stats_rebuild(
|
||||
/*================*/
|
||||
dict_table_t* table,
|
||||
const char* table_name,
|
||||
THD* thd)
|
||||
trx_t* trx)
|
||||
{
|
||||
DBUG_ENTER("alter_stats_rebuild");
|
||||
|
||||
if (dict_table_is_discarded(table)
|
||||
|| !dict_stats_is_persistent_enabled(table)) {
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(DB_SUCCESS);
|
||||
}
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
|
@ -8343,7 +8346,24 @@ alter_stats_rebuild(
|
|||
table->file_unreadable = true;
|
||||
);
|
||||
|
||||
dberr_t ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT);
|
||||
char errstr[1024];
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dberr_t ret = dict_stats_drop_table(table->name.m_name,
|
||||
errstr, sizeof errstr, trx);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
if (ret != DB_SUCCESS) {
|
||||
push_warning_printf(
|
||||
trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO,
|
||||
"Deleting persistent statistics"
|
||||
" for rebuilt table '%s' in"
|
||||
" InnoDB failed: %s",
|
||||
table_name, errstr);
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
|
||||
ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT, trx);
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"ib_rename_index_fail2",
|
||||
|
@ -8352,7 +8372,7 @@ alter_stats_rebuild(
|
|||
|
||||
if (ret != DB_SUCCESS) {
|
||||
push_warning_printf(
|
||||
thd,
|
||||
trx->mysql_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO,
|
||||
"Error updating stats for table '%s'"
|
||||
|
@ -8360,7 +8380,7 @@ alter_stats_rebuild(
|
|||
table_name, ut_strerr(ret));
|
||||
}
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_RETURN(ret);
|
||||
}
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
|
@ -8667,6 +8687,36 @@ ha_innobase::commit_inplace_alter_table(
|
|||
if (fail) {
|
||||
trx_rollback_for_mysql(trx);
|
||||
} else if (!new_clustered) {
|
||||
if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) {
|
||||
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
|
||||
bool warned = false;
|
||||
|
||||
for (inplace_alter_handler_ctx** pctx = ctx_array;
|
||||
*pctx; pctx++) {
|
||||
ha_innobase_inplace_ctx* ctx
|
||||
= static_cast<ha_innobase_inplace_ctx*>
|
||||
(*pctx);
|
||||
|
||||
DBUG_ASSERT(!ctx->need_rebuild());
|
||||
char errstr[1024];
|
||||
if (dict_stats_drop_table(
|
||||
ctx->old_table->name.m_name,
|
||||
errstr, sizeof errstr, trx)
|
||||
!= DB_SUCCESS && !warned) {
|
||||
warned = true;
|
||||
push_warning_printf(
|
||||
m_user_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO,
|
||||
"Deleting persistent "
|
||||
"statistics for table '%s' in"
|
||||
" InnoDB failed: %s",
|
||||
table_share->table_name.str,
|
||||
errstr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trx_commit_for_mysql(trx);
|
||||
} else {
|
||||
mtr_t mtr;
|
||||
|
@ -8923,23 +8973,6 @@ foreign_fail:
|
|||
m_prebuilt->table = dict_table_open_on_name(
|
||||
tb_name, TRUE, TRUE, DICT_ERR_IGNORE_NONE);
|
||||
|
||||
/* Drop outdated table stats. */
|
||||
char errstr[1024];
|
||||
if (dict_stats_drop_table(
|
||||
m_prebuilt->table->name.m_name,
|
||||
errstr, sizeof(errstr))
|
||||
!= DB_SUCCESS) {
|
||||
push_warning_printf(
|
||||
m_user_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO,
|
||||
"Deleting persistent statistics"
|
||||
" for table '%s' in"
|
||||
" InnoDB failed: %s",
|
||||
table->s->table_name.str,
|
||||
errstr);
|
||||
}
|
||||
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
trx_free_for_mysql(trx);
|
||||
MONITOR_ATOMIC_DEC(MONITOR_PENDING_ALTER_TABLE);
|
||||
|
@ -8995,41 +9028,6 @@ foreign_fail:
|
|||
}
|
||||
#endif
|
||||
if (new_clustered) {
|
||||
/* Since the table has been rebuilt, we remove
|
||||
all persistent statistics corresponding to the
|
||||
old copy of the table (which was renamed to
|
||||
ctx->tmp_name). */
|
||||
|
||||
char errstr[1024];
|
||||
|
||||
DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name,
|
||||
ctx->tmp_name));
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"ib_rename_index_fail3",
|
||||
DBUG_SET("+d,innodb_report_deadlock");
|
||||
);
|
||||
|
||||
if (dict_stats_drop_table(
|
||||
ctx->new_table->name.m_name,
|
||||
errstr, sizeof(errstr))
|
||||
!= DB_SUCCESS) {
|
||||
push_warning_printf(
|
||||
m_user_thd,
|
||||
Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_ALTER_INFO,
|
||||
"Deleting persistent statistics"
|
||||
" for rebuilt table '%s' in"
|
||||
" InnoDB failed: %s",
|
||||
table->s->table_name.str,
|
||||
errstr);
|
||||
}
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"ib_rename_index_fail3",
|
||||
DBUG_SET("-d,innodb_report_deadlock");
|
||||
);
|
||||
|
||||
DBUG_EXECUTE_IF("ib_ddl_crash_before_commit",
|
||||
DBUG_SUICIDE(););
|
||||
|
||||
|
@ -9075,11 +9073,13 @@ foreign_fail:
|
|||
}
|
||||
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
trx_free_for_mysql(trx);
|
||||
++trx->will_lock;
|
||||
|
||||
/* TODO: The following code could be executed
|
||||
while allowing concurrent access to the table
|
||||
(MDL downgrade). */
|
||||
trx->mysql_thd = m_user_thd;
|
||||
dberr_t stats_err = DB_SUCCESS;
|
||||
|
||||
if (new_clustered) {
|
||||
for (inplace_alter_handler_ctx** pctx = ctx_array;
|
||||
|
@ -9088,10 +9088,11 @@ foreign_fail:
|
|||
= static_cast<ha_innobase_inplace_ctx*>
|
||||
(*pctx);
|
||||
DBUG_ASSERT(ctx->need_rebuild());
|
||||
|
||||
alter_stats_rebuild(
|
||||
ctx->new_table, table->s->table_name.str,
|
||||
m_user_thd);
|
||||
stats_err = alter_stats_rebuild(
|
||||
ctx->new_table, table->s->table_name.str, trx);
|
||||
if (stats_err != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
DBUG_INJECT_CRASH("ib_commit_inplace_crash",
|
||||
crash_inject_count++);
|
||||
}
|
||||
|
@ -9103,14 +9104,25 @@ foreign_fail:
|
|||
(*pctx);
|
||||
DBUG_ASSERT(!ctx->need_rebuild());
|
||||
|
||||
alter_stats_norebuild(
|
||||
ha_alter_info, ctx, altered_table,
|
||||
table->s->table_name.str, m_user_thd);
|
||||
stats_err = alter_stats_norebuild(
|
||||
ha_alter_info, ctx,
|
||||
table->s->table_name.str, trx);
|
||||
if (stats_err != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
DBUG_INJECT_CRASH("ib_commit_inplace_crash",
|
||||
crash_inject_count++);
|
||||
}
|
||||
}
|
||||
|
||||
if (stats_err != DB_SUCCESS) {
|
||||
trx_rollback_to_savepoint(trx, NULL);
|
||||
} else {
|
||||
trx_commit_for_mysql(trx);
|
||||
}
|
||||
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
innobase_parse_hint_from_comment(
|
||||
m_user_thd, m_prebuilt->table, altered_table->s);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, MariaDB Corporation. All rights Reserved.
|
||||
Copyright (c) 2016, 2017, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -66,28 +66,24 @@ dict_stats_defrag_pool_del(
|
|||
all entries for the table */
|
||||
const dict_index_t* index); /*!< in: index to remove */
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
/** Process indexes that have been scheduled for defragmenting.
|
||||
@param[in,out] trx transaction that will be started and committed */
|
||||
void
|
||||
dict_defrag_process_entries_from_defrag_pool();
|
||||
/*===========================================*/
|
||||
dict_defrag_process_entries_from_defrag_pool(trx_t* trx);
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation result.
|
||||
/** Save defragmentation result.
|
||||
@param[in] index index that was defragmented
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_summary(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
dict_stats_save_defrag_summary(dict_index_t* index, trx_t* trx)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation stats for a given index.
|
||||
/** Save defragmentation stats for a given index.
|
||||
@param[in] index index that is being defragmented
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_stats(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
dict_stats_save_defrag_stats(dict_index_t* index, trx_t* trx)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
#endif /* dict0defrag_bg_h */
|
||||
|
|
|
@ -1583,7 +1583,7 @@ struct dict_table_t {
|
|||
/** How many rows are modified since last stats recalc. When a row is
|
||||
inserted, updated, or deleted, we add 1 to this number; we calculate
|
||||
new estimates for the table and the indexes if the table has changed
|
||||
too much, see row_update_statistics_if_needed(). The counter is reset
|
||||
too much, see dict_stats_update_if_needed(). The counter is reset
|
||||
to zero at statistics calculation. This counter is not protected by
|
||||
any latch, because this is only used for heuristics. */
|
||||
ib_uint64_t stat_modified_counter;
|
||||
|
@ -1842,6 +1842,19 @@ dict_col_get_spatial_status(
|
|||
return(spatial_status);
|
||||
}
|
||||
|
||||
/** Clear defragmentation summary. */
|
||||
inline void dict_stats_empty_defrag_summary(dict_index_t* index)
|
||||
{
|
||||
index->stat_defrag_n_pages_freed = 0;
|
||||
}
|
||||
|
||||
/** Clear defragmentation related index stats. */
|
||||
inline void dict_stats_empty_defrag_stats(dict_index_t* index)
|
||||
{
|
||||
index->stat_defrag_modified_counter = 0;
|
||||
index->stat_defrag_n_page_split = 0;
|
||||
}
|
||||
|
||||
#include "dict0mem.ic"
|
||||
|
||||
#endif /* dict0mem_h */
|
||||
|
|
|
@ -92,13 +92,12 @@ bool
|
|||
dict_stats_auto_recalc_is_enabled(const dict_table_t* table)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Initialize table's stats for the first time when opening a table. */
|
||||
/** Initialize table statistics for the first time when opening a table.
|
||||
@param[in,out] table freshly opened table
|
||||
@param[in,out] trx transaction */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_stats_init(
|
||||
/*============*/
|
||||
dict_table_t* table); /*!< in/out: table */
|
||||
dict_stats_init(dict_table_t* table, trx_t* trx);
|
||||
|
||||
/*********************************************************************//**
|
||||
Deinitialize table's stats after the last close of the table. This is
|
||||
|
@ -117,66 +116,68 @@ void
|
|||
dict_stats_update_if_needed(dict_table_t* table)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/*********************************************************************//**
|
||||
Calculates new estimates for table and index statistics. The statistics
|
||||
are used in query optimization.
|
||||
/** Calculate new estimates for table and index statistics.
|
||||
@param[in,out] table table
|
||||
@param[in] stats_upd_option how to update statistics
|
||||
@param[in,out] trx transaction
|
||||
@return DB_* error code or DB_SUCCESS */
|
||||
dberr_t
|
||||
dict_stats_update(
|
||||
/*==============*/
|
||||
dict_table_t* table, /*!< in/out: table */
|
||||
dict_stats_upd_option_t stats_upd_option);
|
||||
/*!< in: whether to (re) calc
|
||||
the stats or to fetch them from
|
||||
the persistent storage */
|
||||
dict_table_t* table,
|
||||
dict_stats_upd_option_t stats_upd_option,
|
||||
trx_t* trx);
|
||||
|
||||
/*********************************************************************//**
|
||||
Removes the information for a particular index's stats from the persistent
|
||||
storage if it exists and if there is data stored for this index.
|
||||
This function creates its own trx and commits it.
|
||||
/** Remove the persistent statistics for an index.
|
||||
@param[in] db_and_table schema and table name, e.g., 'db/table'
|
||||
@param[in] iname index name
|
||||
@param[out] errstr error message (when not returning DB_SUCCESS)
|
||||
@param[in] errstr_sz sizeof errstr
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_drop_index(
|
||||
/*==================*/
|
||||
const char* tname, /*!< in: table name */
|
||||
const char* iname, /*!< in: index name */
|
||||
char* errstr, /*!< out: error message if != DB_SUCCESS
|
||||
is returned */
|
||||
ulint errstr_sz);/*!< in: size of the errstr buffer */
|
||||
const char* db_and_table,
|
||||
const char* iname,
|
||||
char* errstr,
|
||||
size_t errstr_sz,
|
||||
trx_t* trx);
|
||||
|
||||
/*********************************************************************//**
|
||||
Removes the statistics for a table and all of its indexes from the
|
||||
persistent storage if it exists and if there is data stored for the table.
|
||||
This function creates its own transaction and commits it.
|
||||
/** Remove the persistent statistics for a table and all of its indexes.
|
||||
@param[in] db_and_table schema and table name, e.g., 'db/table'
|
||||
@param[out] errstr error message (when not returning DB_SUCCESS)
|
||||
@param[in] errstr_sz sizeof errstr
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_drop_table(
|
||||
/*==================*/
|
||||
const char* table_name, /*!< in: table name */
|
||||
char* errstr, /*!< out: error message
|
||||
if != DB_SUCCESS is returned */
|
||||
ulint errstr_sz); /*!< in: size of errstr buffer */
|
||||
const char* db_and_table,
|
||||
char* errstr,
|
||||
size_t errstr_sz,
|
||||
trx_t* trx);
|
||||
|
||||
/*********************************************************************//**
|
||||
Fetches or calculates new estimates for index statistics. */
|
||||
void
|
||||
dict_stats_update_for_index(
|
||||
/*========================*/
|
||||
dict_index_t* index) /*!< in/out: index */
|
||||
/** Calculate index statistics.
|
||||
@param[in,out] index index tree
|
||||
@param[in,out] trx transaction (for persistent statistics)
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_update_for_index(dict_index_t* index, trx_t* trx)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/*********************************************************************//**
|
||||
Renames a table in InnoDB persistent stats storage.
|
||||
This function creates its own transaction and commits it.
|
||||
/** Rename a table in the InnoDB persistent statistics storage.
|
||||
@param[in] old_name old schema and table name, e.g., 'db/table'
|
||||
@param[in] new_name new schema and table name, e.g., 'db/table'
|
||||
@param[out] errstr error message (when not returning DB_SUCCESS)
|
||||
@param[in] errstr_sz sizeof errstr
|
||||
@param[in,out] trx transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_rename_table(
|
||||
/*====================*/
|
||||
const char* old_name, /*!< in: old table name */
|
||||
const char* new_name, /*!< in: new table name */
|
||||
char* errstr, /*!< out: error string if != DB_SUCCESS
|
||||
is returned */
|
||||
size_t errstr_sz); /*!< in: errstr size */
|
||||
const char* old_name,
|
||||
const char* new_name,
|
||||
char* errstr,
|
||||
size_t errstr_sz,
|
||||
trx_t* trx);
|
||||
/*********************************************************************//**
|
||||
Renames an index in InnoDB persistent stats storage.
|
||||
This function creates its own transaction and commits it.
|
||||
|
@ -190,52 +191,13 @@ dict_stats_rename_index(
|
|||
const char* old_index_name, /*!< in: old index name */
|
||||
const char* new_index_name) /*!< in: new index name */
|
||||
__attribute__((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Save defragmentation result.
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_save_defrag_summary(
|
||||
dict_index_t* index); /*!< in: index */
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation stats for a given index.
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_save_defrag_stats(
|
||||
dict_index_t* index); /*!< in: index */
|
||||
|
||||
/**********************************************************************//**
|
||||
Clear defragmentation summary. */
|
||||
UNIV_INTERN
|
||||
/** Reset the table and index statsistics, corresponding to an empty table.
|
||||
@param[in,out] table table whose statistics are to be reset
|
||||
@param[in] empty_defrag_stats whether to empty the defrag statistics
|
||||
*/
|
||||
void
|
||||
dict_stats_empty_defrag_summary(
|
||||
/*==================*/
|
||||
dict_index_t* index); /*!< in: index to clear defragmentation stats */
|
||||
|
||||
/**********************************************************************//**
|
||||
Clear defragmentation related index stats. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
dict_stats_empty_defrag_stats(
|
||||
/*==================*/
|
||||
dict_index_t* index); /*!< in: index to clear defragmentation stats */
|
||||
|
||||
|
||||
/*********************************************************************//**
|
||||
Renames an index in InnoDB persistent stats storage.
|
||||
This function creates its own transaction and commits it.
|
||||
@return DB_SUCCESS or error code. DB_STATS_DO_NOT_EXIST will be returned
|
||||
if the persistent stats do not exist. */
|
||||
dberr_t
|
||||
dict_stats_rename_index(
|
||||
/*====================*/
|
||||
const dict_table_t* table, /*!< in: table whose index
|
||||
is renamed */
|
||||
const char* old_index_name, /*!< in: old index name */
|
||||
const char* new_index_name) /*!< in: new index name */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
dict_stats_empty_table(dict_table_t* table, bool empty_defrag_stats = true);
|
||||
|
||||
/** Save an individual index's statistic into the persistent statistics
|
||||
storage.
|
||||
|
@ -245,9 +207,7 @@ storage.
|
|||
@param[in] stat_value value of the stat
|
||||
@param[in] sample_size n pages sampled or NULL
|
||||
@param[in] stat_description description of the stat
|
||||
@param[in,out] trx in case of NULL the function will
|
||||
allocate and free the trx object. If it is not NULL then it will be
|
||||
rolled back only in the case of error, but not freed.
|
||||
@param[in,out] trx dictionary transaction
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_index_stat(
|
||||
|
@ -257,7 +217,8 @@ dict_stats_save_index_stat(
|
|||
ib_uint64_t stat_value,
|
||||
ib_uint64_t* sample_size,
|
||||
const char* stat_description,
|
||||
trx_t* trx);
|
||||
trx_t* trx)
|
||||
MY_ATTRIBUTE((nonnull(1,3,7), warn_unused_result));
|
||||
|
||||
/** Report an error if updating table statistics failed because
|
||||
.ibd file is missing, table decryption failed or table is corrupted.
|
||||
|
|
|
@ -79,7 +79,7 @@ dict_stats_is_persistent_enabled(const dict_table_t* table)
|
|||
protect the ::stat_persistent with dict_table_stats_lock() like the
|
||||
other ::stat_ members which would be too big performance penalty,
|
||||
especially when this function is called from
|
||||
row_update_statistics_if_needed(). */
|
||||
dict_stats_update_if_needed(). */
|
||||
|
||||
/* we rely on this read to be atomic */
|
||||
ib_uint32_t stat_persistent = table->stat_persistent;
|
||||
|
@ -141,13 +141,12 @@ dict_stats_auto_recalc_is_enabled(const dict_table_t* table)
|
|||
}
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Initialize table's stats for the first time when opening a table. */
|
||||
/** Initialize table statistics for the first time when opening a table.
|
||||
@param[in,out] table freshly opened table
|
||||
@param[in,out] trx transaction */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_stats_init(
|
||||
/*============*/
|
||||
dict_table_t* table) /*!< in/out: table */
|
||||
dict_stats_init(dict_table_t* table, trx_t* trx)
|
||||
{
|
||||
ut_ad(!mutex_own(&dict_sys->mutex));
|
||||
|
||||
|
@ -163,7 +162,7 @@ dict_stats_init(
|
|||
opt = DICT_STATS_RECALC_TRANSIENT;
|
||||
}
|
||||
|
||||
dict_stats_update(table, opt);
|
||||
dict_stats_update(table, opt, trx);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
|
|
@ -1123,6 +1123,9 @@ struct trx_t {
|
|||
mysql_trx_list; /*!< list of transactions created for
|
||||
MySQL; protected by trx_sys->mutex */
|
||||
#ifdef UNIV_DEBUG
|
||||
/** whether this transaction is updating persistent statistics
|
||||
(used for silencing a debug assertion at shutdown) */
|
||||
bool persistent_stats;
|
||||
bool in_mysql_trx_list;
|
||||
/*!< true if in
|
||||
trx_sys->mysql_trx_list */
|
||||
|
|
|
@ -110,7 +110,7 @@ IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
|||
ENDIF()
|
||||
|
||||
# Enable InnoDB's UNIV_DEBUG in debug builds
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DUNIV_DEBUG -DUNIV_SYNC_DEBUG")
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DUNIV_DEBUG")
|
||||
|
||||
OPTION(WITH_INNODB_AHI "Include innodb_adaptive_hash_index" ON)
|
||||
OPTION(WITH_INNODB_ROOT_GUESS "Cache index root block descriptors" ON)
|
||||
|
|
|
@ -1675,18 +1675,7 @@ RecLock::prepare() const
|
|||
ut_error;
|
||||
}
|
||||
|
||||
switch (trx_get_dict_operation(m_trx)) {
|
||||
case TRX_DICT_OP_NONE:
|
||||
break;
|
||||
case TRX_DICT_OP_TABLE:
|
||||
case TRX_DICT_OP_INDEX:
|
||||
ib::error() << "A record lock wait happens in a dictionary"
|
||||
" operation. index " << m_index->name
|
||||
<< " of table " << m_index->table->name
|
||||
<< ". " << BUG_REPORT_MSG;
|
||||
ut_ad(0);
|
||||
}
|
||||
|
||||
ut_ad(trx_get_dict_operation(m_trx) == TRX_DICT_OP_NONE);
|
||||
ut_ad(m_index->table->n_ref_count > 0
|
||||
|| !m_index->table->can_be_evicted);
|
||||
}
|
||||
|
@ -2247,6 +2236,24 @@ RecLock::add_to_waitq(const lock_t* wait_for, const lock_prdt_t* prdt)
|
|||
|
||||
/* Do the preliminary checks, and set query thread state */
|
||||
|
||||
switch (UNIV_EXPECT(trx_get_dict_operation(m_trx), TRX_DICT_OP_NONE)) {
|
||||
case TRX_DICT_OP_NONE:
|
||||
break;
|
||||
case TRX_DICT_OP_TABLE:
|
||||
case TRX_DICT_OP_INDEX:
|
||||
ut_ad(!prdt);
|
||||
|
||||
if (m_trx->dict_operation_lock_mode != RW_X_LATCH) {
|
||||
} else if (!strcmp(m_index->table->name.m_name,
|
||||
"mysql/innodb_table_stats")
|
||||
|| !strcmp(m_index->table->name.m_name,
|
||||
"mysql/innodb_index_stats")) {
|
||||
/* Statistics can be updated as part of a DDL
|
||||
transaction, but only as the very last operation. */
|
||||
return(DB_QUE_THR_SUSPENDED);
|
||||
}
|
||||
}
|
||||
|
||||
prepare();
|
||||
|
||||
bool high_priority = trx_is_high_priority(m_trx);
|
||||
|
@ -4628,6 +4635,16 @@ lock_table_enqueue_waiting(
|
|||
break;
|
||||
case TRX_DICT_OP_TABLE:
|
||||
case TRX_DICT_OP_INDEX:
|
||||
if (trx->dict_operation_lock_mode != RW_X_LATCH) {
|
||||
} else if (!strcmp(table->name.m_name,
|
||||
"mysql/innodb_table_stats")
|
||||
|| !strcmp(table->name.m_name,
|
||||
"mysql/innodb_index_stats")) {
|
||||
/* Statistics can be updated as part of a DDL
|
||||
transaction, but only as the very last operation. */
|
||||
return(DB_QUE_THR_SUSPENDED);
|
||||
}
|
||||
|
||||
ib::error() << "A table lock wait happens in a dictionary"
|
||||
" operation. Table " << table->name
|
||||
<< ". " << BUG_REPORT_MSG;
|
||||
|
@ -6415,13 +6432,9 @@ loop:
|
|||
|
||||
ut_ad(!trx_is_ac_nl_ro(lock->trx));
|
||||
|
||||
# ifdef UNIV_DEBUG
|
||||
/* Only validate the record queues when this thread is not
|
||||
holding a space->latch. Deadlocks are possible due to
|
||||
latching order violation when UNIV_DEBUG is defined while
|
||||
UNIV_DEBUG is not. */
|
||||
holding a space->latch. */
|
||||
if (!sync_check_find(SYNC_FSP))
|
||||
# endif /* UNIV_DEBUG */
|
||||
for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
|
||||
|
||||
if (i == 1 || lock_rec_get_nth_bit(lock, i)) {
|
||||
|
|
|
@ -3366,31 +3366,9 @@ run_again:
|
|||
que_thr_stop_for_mysql_no_error(thr, trx);
|
||||
} else {
|
||||
que_thr_stop_for_mysql(thr);
|
||||
ut_ad(err != DB_QUE_THR_SUSPENDED);
|
||||
|
||||
if (err != DB_QUE_THR_SUSPENDED) {
|
||||
ibool was_lock_wait;
|
||||
|
||||
was_lock_wait = row_mysql_handle_errors(
|
||||
&err, trx, thr, NULL);
|
||||
|
||||
if (was_lock_wait) {
|
||||
goto run_again;
|
||||
}
|
||||
} else {
|
||||
que_thr_t* run_thr;
|
||||
que_node_t* parent;
|
||||
|
||||
parent = que_node_get_parent(thr);
|
||||
|
||||
run_thr = que_fork_start_command(
|
||||
static_cast<que_fork_t*>(parent));
|
||||
|
||||
ut_a(run_thr == thr);
|
||||
|
||||
/* There was a lock wait but the thread was not
|
||||
in a ready to run or running state. */
|
||||
trx->error_state = DB_LOCK_WAIT;
|
||||
|
||||
if (row_mysql_handle_errors(&err, trx, thr, NULL)) {
|
||||
goto run_again;
|
||||
}
|
||||
}
|
||||
|
@ -3668,7 +3646,6 @@ row_drop_table_for_mysql(
|
|||
|
||||
if (!dict_table_is_temporary(table) && !table->no_rollback()) {
|
||||
|
||||
dict_stats_recalc_pool_del(table);
|
||||
dict_stats_defrag_pool_del(table, NULL);
|
||||
if (btr_defragment_thread_active) {
|
||||
/* During fts_drop_orphaned_tables() in
|
||||
|
@ -3677,17 +3654,6 @@ row_drop_table_for_mysql(
|
|||
initialized by btr_defragment_init(). */
|
||||
btr_defragment_remove_table(table);
|
||||
}
|
||||
|
||||
/* Remove stats for this table and all of its indexes from the
|
||||
persistent storage if it exists and if there are stats for this
|
||||
table in there. This function creates its own trx and commits
|
||||
it. */
|
||||
char errstr[1024];
|
||||
err = dict_stats_drop_table(name, errstr, sizeof(errstr));
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::warn() << errstr;
|
||||
}
|
||||
}
|
||||
|
||||
dict_table_prevent_eviction(table);
|
||||
|
@ -4021,9 +3987,17 @@ row_drop_table_for_mysql(
|
|||
table_flags = table->flags;
|
||||
ut_ad(!dict_table_is_temporary(table));
|
||||
|
||||
err = row_drop_ancillary_fts_tables(table, trx);
|
||||
if (err != DB_SUCCESS) {
|
||||
break;
|
||||
if (!table->no_rollback()) {
|
||||
char errstr[1024];
|
||||
if (dict_stats_drop_table(name, errstr, sizeof errstr,
|
||||
trx) != DB_SUCCESS) {
|
||||
ib::warn() << errstr;
|
||||
}
|
||||
|
||||
err = row_drop_ancillary_fts_tables(table, trx);
|
||||
if (err != DB_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine the tablespace filename before we drop
|
||||
|
|
|
@ -1271,10 +1271,6 @@ row_truncate_complete(
|
|||
}
|
||||
}
|
||||
|
||||
if (err == DB_SUCCESS) {
|
||||
dict_stats_update(table, DICT_STATS_EMPTY_TABLE);
|
||||
}
|
||||
|
||||
trx->op_info = "";
|
||||
|
||||
/* For temporary tables or if there was an error, we need to reset
|
||||
|
@ -2107,8 +2103,17 @@ row_truncate_table_for_mysql(
|
|||
dict_table_autoinc_unlock(table);
|
||||
|
||||
if (trx_is_started(trx)) {
|
||||
char errstr[1024];
|
||||
if (dict_stats_drop_table(table->name.m_name, errstr,
|
||||
sizeof errstr, trx) != DB_SUCCESS) {
|
||||
ib::warn() << "Deleting persistent "
|
||||
"statistics for table " << table->name
|
||||
<< " failed: " << errstr;
|
||||
}
|
||||
|
||||
trx_commit_for_mysql(trx);
|
||||
|
||||
dict_stats_empty_table(table);
|
||||
}
|
||||
|
||||
return(row_truncate_complete(table, trx, fsp_flags, logger, err));
|
||||
|
|
|
@ -301,7 +301,8 @@ trx_purge_add_update_undo_to_history(
|
|||
&& purge_sys->state == PURGE_STATE_INIT)
|
||||
|| (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND
|
||||
&& purge_sys->state == PURGE_STATE_DISABLED)
|
||||
|| ((trx->undo_no == 0 || trx->in_mysql_trx_list)
|
||||
|| ((trx->undo_no == 0 || trx->in_mysql_trx_list
|
||||
|| trx->persistent_stats)
|
||||
&& srv_fast_shutdown));
|
||||
|
||||
/* Add the log as the first in the history list */
|
||||
|
|
|
@ -553,6 +553,7 @@ trx_validate_state_before_free(trx_t* trx)
|
|||
ut_ad(!trx->declared_to_be_inside_innodb);
|
||||
ut_ad(!trx->n_mysql_tables_in_use);
|
||||
ut_ad(!trx->mysql_n_tables_locked);
|
||||
ut_ad(!trx->persistent_stats);
|
||||
|
||||
if (trx->declared_to_be_inside_innodb) {
|
||||
|
||||
|
|
|
@ -5698,6 +5698,41 @@ int ha_rocksdb::open(const char *const name, int mode, uint test_if_locked) {
|
|||
|
||||
setup_field_converters();
|
||||
|
||||
/*
|
||||
MariaDB: adjust field->part_of_key for PK columns. We can only do it here
|
||||
because SE API is just relying on the HA_PRIMARY_KEY_IN_READ_INDEX which
|
||||
does not allow to distinguish between unpack'able and non-unpack'able
|
||||
columns.
|
||||
Upstream uses handler->init_with_fields() but we don't have that call.
|
||||
*/
|
||||
{
|
||||
if (!has_hidden_pk(table)) {
|
||||
KEY *const pk_info = &table->key_info[table->s->primary_key];
|
||||
for (uint kp = 0; kp < pk_info->user_defined_key_parts; kp++) {
|
||||
if (!m_pk_descr->can_unpack(kp)) {
|
||||
//
|
||||
uint field_index= pk_info->key_part[kp].field->field_index;
|
||||
table->field[field_index]->part_of_key.clear_all();
|
||||
table->field[field_index]->part_of_key.set_bit(table->s->primary_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint key= 0; key < table->s->keys; key++) {
|
||||
KEY *const key_info = &table->key_info[key];
|
||||
if (key == table->s->primary_key)
|
||||
continue;
|
||||
for (uint kp = 0; kp < key_info->usable_key_parts; kp++) {
|
||||
uint field_index= key_info->key_part[kp].field->field_index;
|
||||
if (m_key_descr_arr[key]->can_unpack(kp)) {
|
||||
table->field[field_index]->part_of_key.set_bit(key);
|
||||
} else {
|
||||
table->field[field_index]->part_of_key.clear_bit(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
#
|
||||
# MDEV-14433: RocksDB may show empty or incorrect output with rocksdb_strict_collation_check=off
|
||||
#
|
||||
set global rocksdb_strict_collation_check=off;
|
||||
set @tmp_rscc=@@rocksdb_strict_collation_check;
|
||||
CREATE TABLE t1(
|
||||
a varchar(10) NOT NULL,
|
||||
b char(1) DEFAULT 'X',
|
||||
c char(2) NOT NULL DEFAULT '??',
|
||||
d varchar(10) NOT NULL,
|
||||
e int(11) DEFAULT 0,
|
||||
PRIMARY KEY (a,d),
|
||||
KEY (e)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
|
||||
insert into t1 select 1,1,1,1,0;
|
||||
insert into t1 select 2,1,1,1,0;
|
||||
insert into t1 select 3,1,1,1,0;
|
||||
explain
|
||||
select a from t1 force index(e) where e<10000;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range e e 5 NULL # Using index condition
|
||||
select a from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
select * from t1;
|
||||
a b c d e
|
||||
1 1 1 1 0
|
||||
2 1 1 1 0
|
||||
3 1 1 1 0
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-14563: Wrong query plan for query with no PK
|
||||
#
|
||||
CREATE TABLE t1(
|
||||
pk int primary key,
|
||||
a varchar(10) NOT NULL,
|
||||
e int(11) DEFAULT 0,
|
||||
KEY (a)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
|
||||
insert into t1 values (1,1,1),(2,2,2);
|
||||
explain select a from t1 where a <'zzz';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range a a 32 NULL # Using where
|
||||
CREATE TABLE t2(
|
||||
pk int,
|
||||
a varchar(10) NOT NULL,
|
||||
e int(11) DEFAULT 0,
|
||||
KEY (a)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
|
||||
insert into t2 values (1,1,1),(2,2,2);
|
||||
explain select a from t2 where a <'zzz';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 range a a 32 NULL # Using where
|
||||
drop table t1,t2;
|
||||
set global rocksdb_strict_collation_check=@tmp_rscc;
|
56
storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test
Normal file
56
storage/rocksdb/mysql-test/rocksdb/t/mariadb_port_fixes.test
Normal file
|
@ -0,0 +1,56 @@
|
|||
--source include/have_rocksdb.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-14433: RocksDB may show empty or incorrect output with rocksdb_strict_collation_check=off
|
||||
--echo #
|
||||
set global rocksdb_strict_collation_check=off;
|
||||
set @tmp_rscc=@@rocksdb_strict_collation_check;
|
||||
|
||||
CREATE TABLE t1(
|
||||
a varchar(10) NOT NULL,
|
||||
b char(1) DEFAULT 'X',
|
||||
c char(2) NOT NULL DEFAULT '??',
|
||||
d varchar(10) NOT NULL,
|
||||
e int(11) DEFAULT 0,
|
||||
PRIMARY KEY (a,d),
|
||||
KEY (e)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
|
||||
|
||||
insert into t1 select 1,1,1,1,0;
|
||||
insert into t1 select 2,1,1,1,0;
|
||||
insert into t1 select 3,1,1,1,0;
|
||||
|
||||
--replace_column 9 #
|
||||
explain
|
||||
select a from t1 force index(e) where e<10000;
|
||||
select a from t1;
|
||||
select * from t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-14563: Wrong query plan for query with no PK
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1(
|
||||
pk int primary key,
|
||||
a varchar(10) NOT NULL,
|
||||
e int(11) DEFAULT 0,
|
||||
KEY (a)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
|
||||
insert into t1 values (1,1,1),(2,2,2);
|
||||
--replace_column 9 #
|
||||
explain select a from t1 where a <'zzz';
|
||||
|
||||
CREATE TABLE t2(
|
||||
pk int,
|
||||
a varchar(10) NOT NULL,
|
||||
e int(11) DEFAULT 0,
|
||||
KEY (a)
|
||||
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8;
|
||||
insert into t2 values (1,1,1),(2,2,2);
|
||||
--replace_column 9 #
|
||||
explain select a from t2 where a <'zzz';
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
set global rocksdb_strict_collation_check=@tmp_rscc;
|
|
@ -479,7 +479,6 @@ stats and eventually save its stats. */
|
|||
static
|
||||
void
|
||||
dict_stats_process_entry_from_defrag_pool()
|
||||
/*=======================================*/
|
||||
{
|
||||
table_id_t table_id;
|
||||
index_id_t index_id;
|
||||
|
@ -501,31 +500,18 @@ dict_stats_process_entry_from_defrag_pool()
|
|||
table = dict_table_open_on_id(table_id, TRUE,
|
||||
DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
|
||||
|
||||
if (table == NULL) {
|
||||
dict_index_t* index = table && !table->corrupted
|
||||
? dict_table_find_index_on_id(table, index_id)
|
||||
: NULL;
|
||||
|
||||
if (!index || dict_index_is_corrupted(index)) {
|
||||
if (table) {
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether table is corrupted */
|
||||
if (table->corrupted) {
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
dict_index_t* index = dict_table_find_index_on_id(table, index_id);
|
||||
|
||||
if (index == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether index is corrupted */
|
||||
if (dict_index_is_corrupted(index)) {
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
dict_stats_save_defrag_stats(index);
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue