mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
Merge 10.2 into 10.3
This commit is contained in:
commit
93b6552182
30 changed files with 508 additions and 190 deletions
|
@ -7239,6 +7239,22 @@ a
|
|||
5
|
||||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# MDEV-16820: impossible where with inexpensive subquery
|
||||
#
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
drop table t1,t2;
|
||||
End of 5.5 tests
|
||||
# End of 10.0 tests
|
||||
#
|
||||
|
|
|
@ -6104,6 +6104,21 @@ and t1.a in (select `test`.`t3`.`c` from `test`.`t3`);
|
|||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-16820: impossible where with inexpensive subquery
|
||||
--echo #
|
||||
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
--echo End of 5.5 tests
|
||||
--echo # End of 10.0 tests
|
||||
|
||||
|
|
|
@ -7239,6 +7239,22 @@ a
|
|||
5
|
||||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# MDEV-16820: impossible where with inexpensive subquery
|
||||
#
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
drop table t1,t2;
|
||||
End of 5.5 tests
|
||||
# End of 10.0 tests
|
||||
#
|
||||
|
|
|
@ -7232,6 +7232,22 @@ a
|
|||
5
|
||||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# MDEV-16820: impossible where with inexpensive subquery
|
||||
#
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
drop table t1,t2;
|
||||
End of 5.5 tests
|
||||
# End of 10.0 tests
|
||||
#
|
||||
|
|
|
@ -7230,6 +7230,22 @@ a
|
|||
5
|
||||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# MDEV-16820: impossible where with inexpensive subquery
|
||||
#
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
drop table t1,t2;
|
||||
End of 5.5 tests
|
||||
# End of 10.0 tests
|
||||
#
|
||||
|
|
|
@ -7245,6 +7245,22 @@ a
|
|||
5
|
||||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# MDEV-16820: impossible where with inexpensive subquery
|
||||
#
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
drop table t1,t2;
|
||||
End of 5.5 tests
|
||||
# End of 10.0 tests
|
||||
#
|
||||
|
|
|
@ -7230,6 +7230,22 @@ a
|
|||
5
|
||||
SET @@optimizer_switch= @optimiser_switch_save;
|
||||
DROP TABLE t1, t2, t3;
|
||||
#
|
||||
# MDEV-16820: impossible where with inexpensive subquery
|
||||
#
|
||||
create table t1 (a int) engine=myisam;
|
||||
insert into t1 values (3), (1), (7);
|
||||
create table t2 (b int, index idx(b));
|
||||
insert into t2 values (2), (5), (3), (2);
|
||||
explain select * from t1 where (select max(b) from t2) = 10;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
explain select * from t1 where (select max(b) from t2) = 10 and t1.a > 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
||||
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
|
||||
drop table t1,t2;
|
||||
End of 5.5 tests
|
||||
# End of 10.0 tests
|
||||
#
|
||||
|
|
|
@ -650,6 +650,19 @@ SELECT * FROM t1;
|
|||
f1
|
||||
0
|
||||
DROP TABLE t1;
|
||||
create procedure t1_proc()
|
||||
begin
|
||||
DECLARE var INT UNSIGNED;
|
||||
CREATE TEMPORARY TABLE t1(f1 INT UNSIGNED, f2 INT UNSIGNED, KEY( f1, f2 ) )engine=innodb;
|
||||
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 SET f1 = 1, f2 = 1;
|
||||
UPDATE t1 SET f2 = 2;
|
||||
SET var = ( SELECT 1 FROM t1 );
|
||||
DROP TABLE t1;
|
||||
END//
|
||||
call t1_proc;
|
||||
drop procedure t1_proc;
|
||||
#
|
||||
# MDEV-15874 CREATE TABLE creates extra transaction
|
||||
#
|
||||
|
|
|
@ -477,6 +477,23 @@ ROLLBACK;
|
|||
SELECT * FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
delimiter //;
|
||||
create procedure t1_proc()
|
||||
begin
|
||||
DECLARE var INT UNSIGNED;
|
||||
CREATE TEMPORARY TABLE t1(f1 INT UNSIGNED, f2 INT UNSIGNED, KEY( f1, f2 ) )engine=innodb;
|
||||
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 SET f1 = 1, f2 = 1;
|
||||
UPDATE t1 SET f2 = 2;
|
||||
SET var = ( SELECT 1 FROM t1 );
|
||||
DROP TABLE t1;
|
||||
END//
|
||||
delimiter ;//
|
||||
|
||||
call t1_proc;
|
||||
drop procedure t1_proc;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-15874 CREATE TABLE creates extra transaction
|
||||
--echo #
|
||||
|
|
37
mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result
Normal file
37
mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result
Normal file
|
@ -0,0 +1,37 @@
|
|||
SET GLOBAL innodb_log_optimize_ddl=OFF;
|
||||
CREATE TABLE tz(id BIGINT PRIMARY KEY, i INT)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED;
|
||||
INSERT INTO tz(id) select * from seq_1_to_10000;
|
||||
CREATE TABLE tr(id BIGINT PRIMARY KEY, i INT)
|
||||
ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
|
||||
INSERT INTO tr(id) select * from seq_1_to_10000;
|
||||
CREATE TABLE td(id BIGINT PRIMARY KEY, i INT)
|
||||
ENGINE=InnoDB;
|
||||
INSERT INTO td(id) select * from seq_1_to_10000;
|
||||
CREATE PROCEDURE a()
|
||||
BEGIN
|
||||
ALTER TABLE tz ADD INDEX(i);
|
||||
ALTER TABLE tr ADD INDEX(i);
|
||||
ALTER TABLE td ADD INDEX(i);
|
||||
END //
|
||||
call a();
|
||||
# shutdown server
|
||||
# remove datadir
|
||||
# xtrabackup move back
|
||||
# restart server
|
||||
DROP PROCEDURE a;
|
||||
CHECK TABLE tz,tr,td;
|
||||
Table Op Msg_type Msg_text
|
||||
test.tz check status OK
|
||||
test.tr check status OK
|
||||
test.td check status OK
|
||||
SELECT COUNT(*) FROM tz;
|
||||
COUNT(*)
|
||||
10000
|
||||
SELECT COUNT(*) FROM tr;
|
||||
COUNT(*)
|
||||
10000
|
||||
SELECT COUNT(*) FROM td;
|
||||
COUNT(*)
|
||||
10000
|
||||
DROP TABLE tz,tr,td;
|
47
mysql-test/suite/mariabackup/innodb_log_optimize_ddl.test
Normal file
47
mysql-test/suite/mariabackup/innodb_log_optimize_ddl.test
Normal file
|
@ -0,0 +1,47 @@
|
|||
# see unsupported_redo.test for the opposite (default) case
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
SET GLOBAL innodb_log_optimize_ddl=OFF;
|
||||
|
||||
CREATE TABLE tz(id BIGINT PRIMARY KEY, i INT)
|
||||
ENGINE=InnoDB ROW_FORMAT=COMPRESSED;
|
||||
INSERT INTO tz(id) select * from seq_1_to_10000;
|
||||
CREATE TABLE tr(id BIGINT PRIMARY KEY, i INT)
|
||||
ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
|
||||
INSERT INTO tr(id) select * from seq_1_to_10000;
|
||||
CREATE TABLE td(id BIGINT PRIMARY KEY, i INT)
|
||||
ENGINE=InnoDB;
|
||||
INSERT INTO td(id) select * from seq_1_to_10000;
|
||||
|
||||
DELIMITER //;
|
||||
CREATE PROCEDURE a()
|
||||
BEGIN
|
||||
ALTER TABLE tz ADD INDEX(i);
|
||||
ALTER TABLE tr ADD INDEX(i);
|
||||
ALTER TABLE td ADD INDEX(i);
|
||||
END //
|
||||
DELIMITER ;//
|
||||
|
||||
let $targetdir=$MYSQLTEST_VARDIR/tmp/backup;
|
||||
|
||||
send call a();
|
||||
|
||||
--disable_result_log
|
||||
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir;
|
||||
--enable_result_log
|
||||
exec $XTRABACKUP --prepare --target-dir=$targetdir;
|
||||
|
||||
reap;
|
||||
|
||||
-- source include/restart_and_restore.inc
|
||||
--rmdir $targetdir
|
||||
|
||||
DROP PROCEDURE a;
|
||||
|
||||
CHECK TABLE tz,tr,td;
|
||||
SELECT COUNT(*) FROM tz;
|
||||
SELECT COUNT(*) FROM tr;
|
||||
SELECT COUNT(*) FROM td;
|
||||
|
||||
DROP TABLE tz,tr,td;
|
|
@ -21,7 +21,7 @@ connection slave;
|
|||
sync_with_master;
|
||||
STOP SLAVE;
|
||||
connection master;
|
||||
SET PASSWORD FOR root@"localhost" = PASSWORD('foo');
|
||||
UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
connection master;
|
||||
|
@ -29,7 +29,7 @@ connection master;
|
|||
# Give slave time to do at last one failed connect retry
|
||||
# This one must be short so that the slave will not stop retrying
|
||||
real_sleep 2;
|
||||
SET PASSWORD FOR root@"localhost" = PASSWORD('');
|
||||
UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
|
||||
# Give slave time to connect (will retry every second)
|
||||
|
||||
sleep 2;
|
||||
|
|
|
@ -19,11 +19,11 @@ create temporary table tmp select * from mysql.user where host="localhost" and u
|
|||
connection slave;
|
||||
STOP SLAVE;
|
||||
connection master;
|
||||
SET PASSWORD FOR root@"localhost" = PASSWORD('foo');
|
||||
UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
|
||||
connection slave;
|
||||
START SLAVE;
|
||||
connection master;
|
||||
SET PASSWORD FOR root@"localhost" = PASSWORD('');
|
||||
UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
|
||||
CREATE TABLE t3(n INT);
|
||||
INSERT INTO t3 VALUES(1),(2);
|
||||
connection slave;
|
||||
|
|
|
@ -19,11 +19,11 @@ abandons
|
|||
connection slave;
|
||||
stop slave;
|
||||
connection master;
|
||||
set password for root@"localhost" = password('foo');
|
||||
UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
|
||||
connection slave;
|
||||
start slave;
|
||||
connection master;
|
||||
set password for root@"localhost" = password('');
|
||||
UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
|
||||
create table t3(n int);
|
||||
insert into t3 values(1),(2);
|
||||
connection slave;
|
||||
|
|
|
@ -19,7 +19,7 @@ select * from t1 limit 10;
|
|||
sync_slave_with_master;
|
||||
stop slave;
|
||||
connection master;
|
||||
set password for root@"localhost" = password('foo');
|
||||
UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
|
||||
connection slave;
|
||||
start slave;
|
||||
connection master;
|
||||
|
@ -27,7 +27,7 @@ connection master;
|
|||
# Give slave time to do at last one failed connect retry
|
||||
# This one must be short so that the slave will not stop retrying
|
||||
real_sleep 2;
|
||||
set password for root@"localhost" = password('');
|
||||
UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
|
||||
# Give slave time to connect (will retry every second)
|
||||
sleep 2;
|
||||
|
||||
|
|
|
@ -1450,6 +1450,20 @@ NUMERIC_BLOCK_SIZE NULL
|
|||
ENUM_VALUE_LIST NULL
|
||||
READ_ONLY YES
|
||||
COMMAND_LINE_ARGUMENT REQUIRED
|
||||
VARIABLE_NAME INNODB_LOG_OPTIMIZE_DDL
|
||||
SESSION_VALUE NULL
|
||||
GLOBAL_VALUE ON
|
||||
GLOBAL_VALUE_ORIGIN COMPILE-TIME
|
||||
DEFAULT_VALUE ON
|
||||
VARIABLE_SCOPE GLOBAL
|
||||
VARIABLE_TYPE BOOLEAN
|
||||
VARIABLE_COMMENT Reduce redo logging when natively creating indexes or rebuilding tables. Setting this OFF avoids delay due to page flushing and allows concurrent backup.
|
||||
NUMERIC_MIN_VALUE NULL
|
||||
NUMERIC_MAX_VALUE NULL
|
||||
NUMERIC_BLOCK_SIZE NULL
|
||||
ENUM_VALUE_LIST OFF,ON
|
||||
READ_ONLY NO
|
||||
COMMAND_LINE_ARGUMENT OPTIONAL
|
||||
VARIABLE_NAME INNODB_LOG_WRITE_AHEAD_SIZE
|
||||
SESSION_VALUE NULL
|
||||
GLOBAL_VALUE 8192
|
||||
|
|
|
@ -1691,6 +1691,11 @@ public:
|
|||
virtual bool limit_index_condition_pushdown_processor(void *arg) { return 0; }
|
||||
virtual bool exists2in_processor(void *arg) { return 0; }
|
||||
virtual bool find_selective_predicates_list_processor(void *arg) { return 0; }
|
||||
bool cleanup_is_expensive_cache_processor(void *arg)
|
||||
{
|
||||
is_expensive_cache= (int8)(-1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
TRUE if the expression depends only on the table indicated by tab_map
|
||||
|
|
|
@ -1642,6 +1642,13 @@ JOIN::optimize_inner()
|
|||
if (optimize_constant_subqueries())
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if (conds && conds->with_subquery())
|
||||
(void) conds->walk(&Item::cleanup_is_expensive_cache_processor,
|
||||
0, (void *) 0);
|
||||
if (having && having->with_subquery())
|
||||
(void) having->walk(&Item::cleanup_is_expensive_cache_processor,
|
||||
0, (void *) 0);
|
||||
|
||||
if (setup_jtbm_semi_joins(this, join_list, &conds))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
|
|
|
@ -29,9 +29,12 @@ Created 03/11/2014 Shaohua Wang
|
|||
#include "btr0cur.h"
|
||||
#include "btr0pcur.h"
|
||||
#include "ibuf0ibuf.h"
|
||||
#include "trx0trx.h"
|
||||
|
||||
/** Innodb B-tree index fill factor for bulk load. */
|
||||
uint innobase_fill_factor;
|
||||
/** whether to reduce redo logging during ALTER TABLE */
|
||||
my_bool innodb_log_optimize_ddl;
|
||||
|
||||
/** Initialize members, allocate page if needed and start mtr.
|
||||
Note: we commit all mtrs on failure.
|
||||
|
@ -39,7 +42,6 @@ Note: we commit all mtrs on failure.
|
|||
dberr_t
|
||||
PageBulk::init()
|
||||
{
|
||||
mtr_t* mtr;
|
||||
buf_block_t* new_block;
|
||||
page_t* new_page;
|
||||
page_zip_des_t* new_page_zip;
|
||||
|
@ -48,12 +50,14 @@ PageBulk::init()
|
|||
ut_ad(m_heap == NULL);
|
||||
m_heap = mem_heap_create(1000);
|
||||
|
||||
mtr = static_cast<mtr_t*>(
|
||||
mem_heap_alloc(m_heap, sizeof(mtr_t)));
|
||||
mtr_start(mtr);
|
||||
mtr_x_lock(dict_index_get_lock(m_index), mtr);
|
||||
mtr_set_log_mode(mtr, MTR_LOG_NO_REDO);
|
||||
mtr_set_flush_observer(mtr, m_flush_observer);
|
||||
m_mtr.start();
|
||||
mtr_x_lock(&m_index->lock, &m_mtr);
|
||||
if (m_flush_observer) {
|
||||
m_mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
m_mtr.set_flush_observer(m_flush_observer);
|
||||
} else {
|
||||
m_index->set_modified(m_mtr);
|
||||
}
|
||||
|
||||
if (m_page_no == FIL_NULL) {
|
||||
mtr_t alloc_mtr;
|
||||
|
@ -71,14 +75,14 @@ PageBulk::init()
|
|||
m_index->table->space,
|
||||
1, FSP_NORMAL, &alloc_mtr);
|
||||
if (!success) {
|
||||
mtr_commit(&alloc_mtr);
|
||||
mtr_commit(mtr);
|
||||
alloc_mtr.commit();
|
||||
m_mtr.commit();
|
||||
return(DB_OUT_OF_FILE_SPACE);
|
||||
}
|
||||
|
||||
/* Allocate a new page. */
|
||||
new_block = btr_page_alloc(m_index, 0, FSP_UP, m_level,
|
||||
&alloc_mtr, mtr);
|
||||
&alloc_mtr, &m_mtr);
|
||||
|
||||
m_index->table->space->release_free_extents(n_reserved);
|
||||
|
||||
|
@ -90,24 +94,35 @@ PageBulk::init()
|
|||
|
||||
if (new_page_zip) {
|
||||
page_create_zip(new_block, m_index, m_level, 0,
|
||||
NULL, mtr);
|
||||
NULL, &m_mtr);
|
||||
memset(FIL_PAGE_PREV + new_page, 0xff, 8);
|
||||
page_zip_write_header(new_page_zip,
|
||||
FIL_PAGE_PREV + new_page,
|
||||
8, &m_mtr);
|
||||
mach_write_to_8(PAGE_HEADER + PAGE_INDEX_ID + new_page,
|
||||
m_index->id);
|
||||
page_zip_write_header(new_page_zip,
|
||||
PAGE_HEADER + PAGE_INDEX_ID
|
||||
+ new_page, 8, &m_mtr);
|
||||
} else {
|
||||
ut_ad(!dict_index_is_spatial(m_index));
|
||||
page_create(new_block, mtr,
|
||||
page_create(new_block, &m_mtr,
|
||||
dict_table_is_comp(m_index->table),
|
||||
false);
|
||||
btr_page_set_level(new_page, NULL, m_level, mtr);
|
||||
mlog_write_ulint(FIL_PAGE_PREV + new_page, FIL_NULL,
|
||||
MLOG_4BYTES, &m_mtr);
|
||||
mlog_write_ulint(FIL_PAGE_NEXT + new_page, FIL_NULL,
|
||||
MLOG_4BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_LEVEL + new_page,
|
||||
m_level, MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ull(PAGE_HEADER + PAGE_INDEX_ID + new_page,
|
||||
m_index->id, &m_mtr);
|
||||
}
|
||||
|
||||
btr_page_set_next(new_page, NULL, FIL_NULL, mtr);
|
||||
btr_page_set_prev(new_page, NULL, FIL_NULL, mtr);
|
||||
|
||||
btr_page_set_index_id(new_page, NULL, m_index->id, mtr);
|
||||
} else {
|
||||
new_block = btr_block_get(
|
||||
page_id_t(m_index->table->space->id, m_page_no),
|
||||
page_size_t(m_index->table->space->flags),
|
||||
RW_X_LATCH, m_index, mtr);
|
||||
RW_X_LATCH, m_index, &m_mtr);
|
||||
|
||||
new_page = buf_block_get_frame(new_block);
|
||||
new_page_zip = buf_block_get_page_zip(new_block);
|
||||
|
@ -116,16 +131,14 @@ PageBulk::init()
|
|||
|
||||
ut_ad(page_dir_get_n_heap(new_page) == PAGE_HEAP_NO_USER_LOW);
|
||||
|
||||
btr_page_set_level(new_page, NULL, m_level, mtr);
|
||||
btr_page_set_level(new_page, new_page_zip, m_level, &m_mtr);
|
||||
}
|
||||
|
||||
if (dict_index_is_sec_or_ibuf(m_index)
|
||||
&& !m_index->table->is_temporary()
|
||||
&& page_is_leaf(new_page)) {
|
||||
page_update_max_trx_id(new_block, NULL, m_trx_id, mtr);
|
||||
if (!m_level && dict_index_is_sec_or_ibuf(m_index)) {
|
||||
page_update_max_trx_id(new_block, new_page_zip, m_trx_id,
|
||||
&m_mtr);
|
||||
}
|
||||
|
||||
m_mtr = mtr;
|
||||
m_block = new_block;
|
||||
m_block->skip_flush_check = true;
|
||||
m_page = new_page;
|
||||
|
@ -149,7 +162,9 @@ PageBulk::init()
|
|||
m_rec_no = page_header_get_field(new_page, PAGE_N_RECS);
|
||||
|
||||
ut_d(m_total_data = 0);
|
||||
page_header_set_field(m_page, NULL, PAGE_HEAP_TOP, srv_page_size - 1);
|
||||
/* See page_copy_rec_list_end_to_created_page() */
|
||||
ut_d(page_header_set_field(m_page, NULL, PAGE_HEAP_TOP,
|
||||
srv_page_size - 1));
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
@ -217,6 +232,14 @@ PageBulk::insert(
|
|||
m_free_space -= rec_size + slot_size;
|
||||
m_heap_top += rec_size;
|
||||
m_rec_no += 1;
|
||||
|
||||
if (!m_flush_observer && !m_page_zip) {
|
||||
/* For ROW_FORMAT=COMPRESSED, redo log may be written
|
||||
in PageBulk::compress(). */
|
||||
page_cur_insert_rec_write_log(insert_rec, rec_size,
|
||||
m_cur_rec, m_index, &m_mtr);
|
||||
}
|
||||
|
||||
m_cur_rec = insert_rec;
|
||||
}
|
||||
|
||||
|
@ -227,15 +250,10 @@ void
|
|||
PageBulk::finish()
|
||||
{
|
||||
ut_ad(m_rec_no > 0);
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
ut_ad(m_total_data + page_dir_calc_reserved_space(m_rec_no)
|
||||
<= page_get_free_space_of_empty(m_is_comp));
|
||||
|
||||
/* To pass the debug tests we have to set these dummy values
|
||||
in the debug version */
|
||||
page_dir_set_n_slots(m_page, NULL, srv_page_size / 2);
|
||||
#endif
|
||||
/* See page_copy_rec_list_end_to_created_page() */
|
||||
ut_d(page_dir_set_n_slots(m_page, NULL, srv_page_size / 2));
|
||||
|
||||
ulint count = 0;
|
||||
ulint n_recs = 0;
|
||||
|
@ -286,15 +304,45 @@ PageBulk::finish()
|
|||
page_dir_slot_set_n_owned(slot, NULL, count + 1);
|
||||
|
||||
ut_ad(!dict_index_is_spatial(m_index));
|
||||
page_dir_set_n_slots(m_page, NULL, 2 + slot_index);
|
||||
page_header_set_ptr(m_page, NULL, PAGE_HEAP_TOP, m_heap_top);
|
||||
page_dir_set_n_heap(m_page, NULL, PAGE_HEAP_NO_USER_LOW + m_rec_no);
|
||||
page_header_set_ptr(m_page, NULL, PAGE_LAST_INSERT, m_cur_rec);
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
|
||||
ut_ad(!page_get_instant(m_page));
|
||||
m_page[PAGE_HEADER + PAGE_DIRECTION_B] = PAGE_RIGHT;
|
||||
*reinterpret_cast<uint16_t*>(PAGE_HEADER + PAGE_N_DIRECTION + m_page)
|
||||
= 0;
|
||||
|
||||
if (!m_flush_observer && !m_page_zip) {
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_N_DIR_SLOTS + m_page,
|
||||
2 + slot_index, MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_HEAP_TOP + m_page,
|
||||
ulint(m_heap_top - m_page),
|
||||
MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_N_HEAP + m_page,
|
||||
(PAGE_HEAP_NO_USER_LOW + m_rec_no)
|
||||
| ulint(m_is_comp) << 15,
|
||||
MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no,
|
||||
MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_LAST_INSERT + m_page,
|
||||
ulint(m_cur_rec - m_page),
|
||||
MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
|
||||
PAGE_RIGHT, MLOG_2BYTES, &m_mtr);
|
||||
mlog_write_ulint(PAGE_HEADER + PAGE_N_DIRECTION + m_page, 0,
|
||||
MLOG_2BYTES, &m_mtr);
|
||||
} else {
|
||||
/* For ROW_FORMAT=COMPRESSED, redo log may be written
|
||||
in PageBulk::compress(). */
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_N_DIR_SLOTS + m_page,
|
||||
2 + slot_index);
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_HEAP_TOP + m_page,
|
||||
ulint(m_heap_top - m_page));
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_N_HEAP + m_page,
|
||||
(PAGE_HEAP_NO_USER_LOW + m_rec_no)
|
||||
| ulint(m_is_comp) << 15);
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no);
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_LAST_INSERT + m_page,
|
||||
ulint(m_cur_rec - m_page));
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_DIRECTION_B - 1 + m_page,
|
||||
PAGE_RIGHT);
|
||||
mach_write_to_2(PAGE_HEADER + PAGE_N_DIRECTION + m_page, 0);
|
||||
}
|
||||
|
||||
m_block->skip_flush_check = false;
|
||||
}
|
||||
|
||||
|
@ -308,15 +356,13 @@ PageBulk::commit(
|
|||
ut_ad(page_validate(m_page, m_index));
|
||||
|
||||
/* Set no free space left and no buffered changes in ibuf. */
|
||||
if (!dict_index_is_clust(m_index)
|
||||
&& !m_index->table->is_temporary()
|
||||
&& page_is_leaf(m_page)) {
|
||||
if (!dict_index_is_clust(m_index) && page_is_leaf(m_page)) {
|
||||
ibuf_set_bitmap_for_bulk_load(
|
||||
m_block, innobase_fill_factor == 100);
|
||||
}
|
||||
}
|
||||
|
||||
mtr_commit(m_mtr);
|
||||
m_mtr.commit();
|
||||
}
|
||||
|
||||
/** Compress a page of compressed table
|
||||
|
@ -328,7 +374,7 @@ PageBulk::compress()
|
|||
ut_ad(m_page_zip != NULL);
|
||||
|
||||
return(page_zip_compress(m_page_zip, m_page, m_index,
|
||||
page_zip_level, NULL, m_mtr));
|
||||
page_zip_level, NULL, &m_mtr));
|
||||
}
|
||||
|
||||
/** Get node pointer
|
||||
|
@ -475,20 +521,30 @@ PageBulk::copyOut(
|
|||
|
||||
/** Set next page
|
||||
@param[in] next_page_no next page no */
|
||||
void
|
||||
PageBulk::setNext(
|
||||
ulint next_page_no)
|
||||
inline void PageBulk::setNext(ulint next_page_no)
|
||||
{
|
||||
btr_page_set_next(m_page, NULL, next_page_no, m_mtr);
|
||||
if (UNIV_LIKELY_NULL(m_page_zip)) {
|
||||
/* For ROW_FORMAT=COMPRESSED, redo log may be written
|
||||
in PageBulk::compress(). */
|
||||
mach_write_to_4(m_page + FIL_PAGE_NEXT, next_page_no);
|
||||
} else {
|
||||
mlog_write_ulint(m_page + FIL_PAGE_NEXT, next_page_no,
|
||||
MLOG_4BYTES, &m_mtr);
|
||||
}
|
||||
}
|
||||
|
||||
/** Set previous page
|
||||
@param[in] prev_page_no previous page no */
|
||||
void
|
||||
PageBulk::setPrev(
|
||||
ulint prev_page_no)
|
||||
inline void PageBulk::setPrev(ulint prev_page_no)
|
||||
{
|
||||
btr_page_set_prev(m_page, NULL, prev_page_no, m_mtr);
|
||||
if (UNIV_LIKELY_NULL(m_page_zip)) {
|
||||
/* For ROW_FORMAT=COMPRESSED, redo log may be written
|
||||
in PageBulk::compress(). */
|
||||
mach_write_to_4(m_page + FIL_PAGE_PREV, prev_page_no);
|
||||
} else {
|
||||
mlog_write_ulint(m_page + FIL_PAGE_PREV, prev_page_no,
|
||||
MLOG_4BYTES, &m_mtr);
|
||||
}
|
||||
}
|
||||
|
||||
/** Check if required space is available in the page for the rec to be inserted.
|
||||
|
@ -561,7 +617,7 @@ PageBulk::storeExt(
|
|||
page_cur->block = m_block;
|
||||
|
||||
dberr_t err = btr_store_big_rec_extern_fields(
|
||||
&btr_pcur, offsets, big_rec, m_mtr, BTR_STORE_INSERT_BULK);
|
||||
&btr_pcur, offsets, big_rec, &m_mtr, BTR_STORE_INSERT_BULK);
|
||||
|
||||
ut_ad(page_offset(m_cur_rec) == page_offset(page_cur->rec));
|
||||
|
||||
|
@ -587,30 +643,30 @@ PageBulk::release()
|
|||
/* No other threads can modify this block. */
|
||||
m_modify_clock = buf_block_get_modify_clock(m_block);
|
||||
|
||||
mtr_commit(m_mtr);
|
||||
m_mtr.commit();
|
||||
}
|
||||
|
||||
/** Start mtr and latch the block */
|
||||
dberr_t
|
||||
PageBulk::latch()
|
||||
{
|
||||
ibool ret;
|
||||
m_mtr.start();
|
||||
mtr_x_lock(&m_index->lock, &m_mtr);
|
||||
if (m_flush_observer) {
|
||||
m_mtr.set_log_mode(MTR_LOG_NO_REDO);
|
||||
m_mtr.set_flush_observer(m_flush_observer);
|
||||
} else {
|
||||
m_index->set_modified(m_mtr);
|
||||
}
|
||||
|
||||
mtr_start(m_mtr);
|
||||
mtr_x_lock(dict_index_get_lock(m_index), m_mtr);
|
||||
mtr_set_log_mode(m_mtr, MTR_LOG_NO_REDO);
|
||||
mtr_set_flush_observer(m_mtr, m_flush_observer);
|
||||
|
||||
/* TODO: need a simple and wait version of buf_page_optimistic_get. */
|
||||
ret = buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock,
|
||||
__FILE__, __LINE__, m_mtr);
|
||||
/* In case the block is S-latched by page_cleaner. */
|
||||
if (!ret) {
|
||||
if (!buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock,
|
||||
__FILE__, __LINE__, &m_mtr)) {
|
||||
m_block = buf_page_get_gen(
|
||||
page_id_t(m_index->table->space->id, m_page_no),
|
||||
page_size_t(m_index->table->space->flags),
|
||||
RW_X_LATCH, m_block, BUF_GET_IF_IN_POOL,
|
||||
__FILE__, __LINE__, m_mtr, &m_err);
|
||||
__FILE__, __LINE__, &m_mtr, &m_err);
|
||||
|
||||
if (m_err != DB_SUCCESS) {
|
||||
return (m_err);
|
||||
|
@ -643,7 +699,7 @@ BtrBulk::pageSplit(
|
|||
}
|
||||
|
||||
/* 2. create a new page. */
|
||||
PageBulk new_page_bulk(m_index, m_trx_id, FIL_NULL,
|
||||
PageBulk new_page_bulk(m_index, m_trx->id, FIL_NULL,
|
||||
page_bulk->getLevel(), m_flush_observer);
|
||||
dberr_t err = new_page_bulk.init();
|
||||
if (err != DB_SUCCESS) {
|
||||
|
@ -722,8 +778,7 @@ BtrBulk::pageCommit(
|
|||
}
|
||||
|
||||
/** Log free check */
|
||||
void
|
||||
BtrBulk::logFreeCheck()
|
||||
inline void BtrBulk::logFreeCheck()
|
||||
{
|
||||
if (log_sys.check_flush_or_checkpoint) {
|
||||
release();
|
||||
|
@ -738,10 +793,10 @@ BtrBulk::logFreeCheck()
|
|||
void
|
||||
BtrBulk::release()
|
||||
{
|
||||
ut_ad(m_root_level + 1 == m_page_bulks->size());
|
||||
ut_ad(m_root_level + 1 == m_page_bulks.size());
|
||||
|
||||
for (ulint level = 0; level <= m_root_level; level++) {
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
PageBulk* page_bulk = m_page_bulks.at(level);
|
||||
|
||||
page_bulk->release();
|
||||
}
|
||||
|
@ -751,10 +806,10 @@ BtrBulk::release()
|
|||
void
|
||||
BtrBulk::latch()
|
||||
{
|
||||
ut_ad(m_root_level + 1 == m_page_bulks->size());
|
||||
ut_ad(m_root_level + 1 == m_page_bulks.size());
|
||||
|
||||
for (ulint level = 0; level <= m_root_level; level++) {
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
PageBulk* page_bulk = m_page_bulks.at(level);
|
||||
page_bulk->latch();
|
||||
}
|
||||
}
|
||||
|
@ -771,28 +826,26 @@ BtrBulk::insert(
|
|||
bool is_left_most = false;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
ut_ad(m_heap != NULL);
|
||||
|
||||
/* Check if we need to create a PageBulk for the level. */
|
||||
if (level + 1 > m_page_bulks->size()) {
|
||||
if (level + 1 > m_page_bulks.size()) {
|
||||
PageBulk* new_page_bulk
|
||||
= UT_NEW_NOKEY(PageBulk(m_index, m_trx_id, FIL_NULL,
|
||||
= UT_NEW_NOKEY(PageBulk(m_index, m_trx->id, FIL_NULL,
|
||||
level, m_flush_observer));
|
||||
err = new_page_bulk->init();
|
||||
if (err != DB_SUCCESS) {
|
||||
return(err);
|
||||
}
|
||||
|
||||
m_page_bulks->push_back(new_page_bulk);
|
||||
ut_ad(level + 1 == m_page_bulks->size());
|
||||
m_page_bulks.push_back(new_page_bulk);
|
||||
ut_ad(level + 1 == m_page_bulks.size());
|
||||
m_root_level = level;
|
||||
|
||||
is_left_most = true;
|
||||
}
|
||||
|
||||
ut_ad(m_page_bulks->size() > level);
|
||||
ut_ad(m_page_bulks.size() > level);
|
||||
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
PageBulk* page_bulk = m_page_bulks.at(level);
|
||||
|
||||
if (is_left_most && level > 0 && page_bulk->getRecNo() == 0) {
|
||||
/* The node pointer must be marked as the predefined minimum
|
||||
|
@ -829,7 +882,7 @@ BtrBulk::insert(
|
|||
if (!page_bulk->isSpaceAvailable(rec_size)) {
|
||||
/* Create a sibling page_bulk. */
|
||||
PageBulk* sibling_page_bulk;
|
||||
sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx_id,
|
||||
sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx->id,
|
||||
FIL_NULL, level,
|
||||
m_flush_observer));
|
||||
err = sibling_page_bulk->init();
|
||||
|
@ -848,15 +901,18 @@ BtrBulk::insert(
|
|||
|
||||
/* Set new page bulk to page_bulks. */
|
||||
ut_ad(sibling_page_bulk->getLevel() <= m_root_level);
|
||||
m_page_bulks->at(level) = sibling_page_bulk;
|
||||
m_page_bulks.at(level) = sibling_page_bulk;
|
||||
|
||||
UT_DELETE(page_bulk);
|
||||
page_bulk = sibling_page_bulk;
|
||||
|
||||
/* Important: log_free_check whether we need a checkpoint. */
|
||||
if (page_is_leaf(sibling_page_bulk->getPage())) {
|
||||
/* Check whether trx is interrupted */
|
||||
if (m_flush_observer->check_interrupted()) {
|
||||
if (trx_is_interrupted(m_trx)) {
|
||||
if (m_flush_observer) {
|
||||
m_flush_observer->interrupted();
|
||||
}
|
||||
|
||||
err = DB_INTERRUPTED;
|
||||
goto func_exit;
|
||||
}
|
||||
|
@ -881,11 +937,11 @@ BtrBulk::insert(
|
|||
if (big_rec != NULL) {
|
||||
ut_ad(dict_index_is_clust(m_index));
|
||||
ut_ad(page_bulk->getLevel() == 0);
|
||||
ut_ad(page_bulk == m_page_bulks->at(0));
|
||||
ut_ad(page_bulk == m_page_bulks.at(0));
|
||||
|
||||
/* Release all latched but leaf node. */
|
||||
for (ulint level = 1; level <= m_root_level; level++) {
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
PageBulk* page_bulk = m_page_bulks.at(level);
|
||||
|
||||
page_bulk->release();
|
||||
}
|
||||
|
@ -894,7 +950,7 @@ BtrBulk::insert(
|
|||
|
||||
/* Latch */
|
||||
for (ulint level = 1; level <= m_root_level; level++) {
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
PageBulk* page_bulk = m_page_bulks.at(level);
|
||||
page_bulk->latch();
|
||||
}
|
||||
}
|
||||
|
@ -919,17 +975,17 @@ BtrBulk::finish(dberr_t err)
|
|||
|
||||
ut_ad(!m_index->table->is_temporary());
|
||||
|
||||
if (m_page_bulks->size() == 0) {
|
||||
if (m_page_bulks.size() == 0) {
|
||||
/* The table is empty. The root page of the index tree
|
||||
is already in a consistent state. No need to flush. */
|
||||
return(err);
|
||||
}
|
||||
|
||||
ut_ad(m_root_level + 1 == m_page_bulks->size());
|
||||
ut_ad(m_root_level + 1 == m_page_bulks.size());
|
||||
|
||||
/* Finish all page bulks */
|
||||
for (ulint level = 0; level <= m_root_level; level++) {
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
PageBulk* page_bulk = m_page_bulks.at(level);
|
||||
|
||||
last_page_no = page_bulk->getPageNo();
|
||||
|
||||
|
@ -949,7 +1005,7 @@ BtrBulk::finish(dberr_t err)
|
|||
rec_t* first_rec;
|
||||
mtr_t mtr;
|
||||
buf_block_t* last_block;
|
||||
PageBulk root_page_bulk(m_index, m_trx_id,
|
||||
PageBulk root_page_bulk(m_index, m_trx->id,
|
||||
m_index->page, m_root_level,
|
||||
m_flush_observer);
|
||||
|
||||
|
|
|
@ -3769,18 +3769,12 @@ FlushObserver::~FlushObserver()
|
|||
DBUG_LOG("flush", "~FlushObserver(): trx->id=" << m_trx->id);
|
||||
}
|
||||
|
||||
/** Check whether trx is interrupted
|
||||
@return true if trx is interrupted */
|
||||
bool
|
||||
FlushObserver::check_interrupted()
|
||||
/** Check whether the operation has been interrupted */
|
||||
void FlushObserver::check_interrupted()
|
||||
{
|
||||
if (trx_is_interrupted(m_trx)) {
|
||||
interrupted();
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
/** Notify observer of a flush
|
||||
|
|
|
@ -19417,6 +19417,13 @@ static MYSQL_SYSVAR_BOOL(log_compressed_pages, page_zip_log_pages,
|
|||
" compression algorithm doesn't change.",
|
||||
NULL, NULL, TRUE);
|
||||
|
||||
static MYSQL_SYSVAR_BOOL(log_optimize_ddl, innodb_log_optimize_ddl,
|
||||
PLUGIN_VAR_OPCMDARG,
|
||||
"Reduce redo logging when natively creating indexes or rebuilding tables."
|
||||
" Setting this OFF avoids delay due to page flushing and"
|
||||
" allows concurrent backup.",
|
||||
NULL, NULL, TRUE);
|
||||
|
||||
static MYSQL_SYSVAR_ULONG(autoextend_increment,
|
||||
sys_tablespace_auto_extend_increment,
|
||||
PLUGIN_VAR_RQCMDARG,
|
||||
|
@ -20306,6 +20313,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
|
|||
MYSQL_SYSVAR(log_write_ahead_size),
|
||||
MYSQL_SYSVAR(log_group_home_dir),
|
||||
MYSQL_SYSVAR(log_compressed_pages),
|
||||
MYSQL_SYSVAR(log_optimize_ddl),
|
||||
MYSQL_SYSVAR(max_dirty_pages_pct),
|
||||
MYSQL_SYSVAR(max_dirty_pages_pct_lwm),
|
||||
MYSQL_SYSVAR(adaptive_flushing_lwm),
|
||||
|
|
|
@ -34,6 +34,8 @@ Created 03/11/2014 Shaohua Wang
|
|||
|
||||
/** Innodb B-tree index fill factor for bulk load. */
|
||||
extern uint innobase_fill_factor;
|
||||
/** whether to reduce redo logging during ALTER TABLE */
|
||||
extern my_bool innodb_log_optimize_ddl;
|
||||
|
||||
/*
|
||||
The proper function call sequence of PageBulk is as below:
|
||||
|
@ -63,7 +65,7 @@ public:
|
|||
:
|
||||
m_heap(NULL),
|
||||
m_index(index),
|
||||
m_mtr(NULL),
|
||||
m_mtr(),
|
||||
m_trx_id(trx_id),
|
||||
m_block(NULL),
|
||||
m_page(NULL),
|
||||
|
@ -84,6 +86,7 @@ public:
|
|||
m_err(DB_SUCCESS)
|
||||
{
|
||||
ut_ad(!dict_index_is_spatial(m_index));
|
||||
ut_ad(!m_index->table->is_temporary());
|
||||
}
|
||||
|
||||
/** Deconstructor */
|
||||
|
@ -145,11 +148,11 @@ public:
|
|||
|
||||
/** Set next page
|
||||
@param[in] next_page_no next page no */
|
||||
void setNext(ulint next_page_no);
|
||||
inline void setNext(ulint next_page_no);
|
||||
|
||||
/** Set previous page
|
||||
@param[in] prev_page_no previous page no */
|
||||
void setPrev(ulint prev_page_no);
|
||||
inline void setPrev(ulint prev_page_no);
|
||||
|
||||
/** Release block by commiting mtr */
|
||||
inline void release();
|
||||
|
@ -205,8 +208,8 @@ private:
|
|||
/** The index B-tree */
|
||||
dict_index_t* m_index;
|
||||
|
||||
/** The min-transaction */
|
||||
mtr_t* m_mtr;
|
||||
/** The mini-transaction */
|
||||
mtr_t m_mtr;
|
||||
|
||||
/** The transaction id */
|
||||
trx_id_t m_trx_id;
|
||||
|
@ -256,7 +259,7 @@ private:
|
|||
when the block is re-pinned */
|
||||
ib_uint64_t m_modify_clock;
|
||||
|
||||
/** Flush observer */
|
||||
/** Flush observer, or NULL if redo logging is enabled */
|
||||
FlushObserver* m_flush_observer;
|
||||
|
||||
/** Operation result DB_SUCCESS or error code */
|
||||
|
@ -271,41 +274,32 @@ class BtrBulk
|
|||
public:
|
||||
/** Constructor
|
||||
@param[in] index B-tree index
|
||||
@param[in] trx_id transaction id
|
||||
@param[in] trx transaction
|
||||
@param[in] observer flush observer */
|
||||
BtrBulk(
|
||||
dict_index_t* index,
|
||||
trx_id_t trx_id,
|
||||
const trx_t* trx,
|
||||
FlushObserver* observer)
|
||||
:
|
||||
m_heap(NULL),
|
||||
m_index(index),
|
||||
m_trx_id(trx_id),
|
||||
m_trx(trx),
|
||||
m_flush_observer(observer)
|
||||
{
|
||||
ut_ad(m_flush_observer != NULL);
|
||||
ut_d(my_atomic_addlint(
|
||||
&m_index->table->space->redo_skipped_count, 1));
|
||||
#ifdef UNIV_DEBUG
|
||||
if (m_flush_observer)
|
||||
my_atomic_addlint(&m_index->table->space->redo_skipped_count,
|
||||
1);
|
||||
#endif /* UNIV_DEBUG */
|
||||
}
|
||||
|
||||
/** Destructor */
|
||||
~BtrBulk()
|
||||
{
|
||||
mem_heap_free(m_heap);
|
||||
UT_DELETE(m_page_bulks);
|
||||
ut_d(my_atomic_addlint(
|
||||
&m_index->table->space->redo_skipped_count,
|
||||
ulint(-1)));
|
||||
}
|
||||
|
||||
/** Initialization
|
||||
Note: must be called right after constructor. */
|
||||
void init()
|
||||
{
|
||||
ut_ad(m_heap == NULL);
|
||||
m_heap = mem_heap_create(1000);
|
||||
|
||||
m_page_bulks = UT_NEW_NOKEY(page_bulk_vector());
|
||||
#ifdef UNIV_DEBUG
|
||||
if (m_flush_observer)
|
||||
my_atomic_addlint(&m_index->table->space->redo_skipped_count,
|
||||
ulint(-1));
|
||||
#endif /* UNIV_DEBUG */
|
||||
}
|
||||
|
||||
/** Insert a tuple
|
||||
|
@ -365,26 +359,23 @@ private:
|
|||
}
|
||||
|
||||
/** Log free check */
|
||||
void logFreeCheck();
|
||||
inline void logFreeCheck();
|
||||
|
||||
private:
|
||||
/** Memory heap for allocation */
|
||||
mem_heap_t* m_heap;
|
||||
|
||||
/** B-tree index */
|
||||
dict_index_t* m_index;
|
||||
dict_index_t*const m_index;
|
||||
|
||||
/** Transaction id */
|
||||
trx_id_t m_trx_id;
|
||||
/** Transaction */
|
||||
const trx_t*const m_trx;
|
||||
|
||||
/** Root page level */
|
||||
ulint m_root_level;
|
||||
|
||||
/** Flush observer */
|
||||
FlushObserver* m_flush_observer;
|
||||
/** Flush observer, or NULL if redo logging is enabled */
|
||||
FlushObserver*const m_flush_observer;
|
||||
|
||||
/** Page cursor vector for all level */
|
||||
page_bulk_vector* m_page_bulks;
|
||||
page_bulk_vector m_page_bulks;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -371,9 +371,8 @@ public:
|
|||
m_interrupted = true;
|
||||
}
|
||||
|
||||
/** Check whether trx is interrupted
|
||||
@return true if trx is interrupted */
|
||||
bool check_interrupted();
|
||||
/** Check whether the operation has been interrupted */
|
||||
void check_interrupted();
|
||||
|
||||
/** Flush dirty pages. */
|
||||
void flush();
|
||||
|
@ -395,7 +394,7 @@ private:
|
|||
fil_space_t* m_space;
|
||||
|
||||
/** Trx instance */
|
||||
trx_t* const m_trx;
|
||||
const trx_t* const m_trx;
|
||||
|
||||
/** Performance schema accounting object, used by ALTER TABLE.
|
||||
If not NULL, then stage->begin_phase_flush() will be called initially,
|
||||
|
|
|
@ -61,13 +61,6 @@ savepoint. */
|
|||
@return old mode */
|
||||
#define mtr_set_log_mode(m, d) (m)->set_log_mode((d))
|
||||
|
||||
/** Get the flush observer of a mini-transaction.
|
||||
@return flush observer object */
|
||||
#define mtr_get_flush_observer(m) (m)->get_flush_observer()
|
||||
|
||||
/** Set the flush observer of a mini-transaction. */
|
||||
#define mtr_set_flush_observer(m, d) (m)->set_flush_observer((d))
|
||||
|
||||
/** Read 1 - 4 bytes from a file page buffered in the buffer pool.
|
||||
@return value read */
|
||||
#define mtr_read_ulint(p, t, m) (m)->read_ulint((p), (t))
|
||||
|
|
|
@ -328,6 +328,20 @@ page_cur_open_on_rnd_user_rec(
|
|||
/*==========================*/
|
||||
buf_block_t* block, /*!< in: page */
|
||||
page_cur_t* cursor);/*!< out: page cursor */
|
||||
/** Write a redo log record of inserting a record into an index page.
|
||||
@param[in] insert_rec inserted record
|
||||
@param[in] rec_size rec_get_size(insert_rec)
|
||||
@param[in] cursor_rec predecessor of insert_rec
|
||||
@param[in,out] index index tree
|
||||
@param[in,out] mtr mini-transaction */
|
||||
void
|
||||
page_cur_insert_rec_write_log(
|
||||
const rec_t* insert_rec,
|
||||
ulint rec_size,
|
||||
const rec_t* cursor_rec,
|
||||
dict_index_t* index,
|
||||
mtr_t* mtr)
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
/***********************************************************//**
|
||||
Parses a log record of a record insert on a page.
|
||||
@return end of log record or NULL */
|
||||
|
|
|
@ -824,18 +824,19 @@ page_cur_open_on_rnd_user_rec(
|
|||
} while (rnd--);
|
||||
}
|
||||
|
||||
/***********************************************************//**
|
||||
Writes the log record of a record insert on a page. */
|
||||
static
|
||||
/** Write a redo log record of inserting a record into an index page.
|
||||
@param[in] insert_rec inserted record
|
||||
@param[in] rec_size rec_get_size(insert_rec)
|
||||
@param[in] cursor_rec predecessor of insert_rec
|
||||
@param[in,out] index index tree
|
||||
@param[in,out] mtr mini-transaction */
|
||||
void
|
||||
page_cur_insert_rec_write_log(
|
||||
/*==========================*/
|
||||
rec_t* insert_rec, /*!< in: inserted physical record */
|
||||
ulint rec_size, /*!< in: insert_rec size */
|
||||
rec_t* cursor_rec, /*!< in: record the
|
||||
cursor is pointing to */
|
||||
dict_index_t* index, /*!< in: record descriptor */
|
||||
mtr_t* mtr) /*!< in: mini-transaction handle */
|
||||
const rec_t* insert_rec,
|
||||
ulint rec_size,
|
||||
const rec_t* cursor_rec,
|
||||
dict_index_t* index,
|
||||
mtr_t* mtr)
|
||||
{
|
||||
ulint cur_rec_size;
|
||||
ulint extra_size;
|
||||
|
|
|
@ -1687,12 +1687,10 @@ row_fts_merge_insert(
|
|||
ut_ad(aux_index->n_core_null_bytes
|
||||
== UT_BITS_IN_BYTES(aux_index->n_nullable));
|
||||
|
||||
FlushObserver* observer;
|
||||
observer = psort_info[0].psort_common->trx->flush_observer;
|
||||
|
||||
/* Create bulk load instance */
|
||||
ins_ctx.btr_bulk = UT_NEW_NOKEY(BtrBulk(aux_index, trx->id, observer));
|
||||
ins_ctx.btr_bulk->init();
|
||||
ins_ctx.btr_bulk = UT_NEW_NOKEY(
|
||||
BtrBulk(aux_index, trx, psort_info[0].psort_common->trx
|
||||
->flush_observer));
|
||||
|
||||
/* Create tuple for insert */
|
||||
ins_ctx.tuple = dtuple_create(heap, dict_index_get_n_fields(aux_index));
|
||||
|
|
|
@ -2528,17 +2528,16 @@ write_buffers:
|
|||
if (clust_btr_bulk == NULL) {
|
||||
clust_btr_bulk = UT_NEW_NOKEY(
|
||||
BtrBulk(index[i],
|
||||
trx->id,
|
||||
observer));
|
||||
|
||||
clust_btr_bulk->init();
|
||||
trx,
|
||||
observer/**/));
|
||||
} else {
|
||||
clust_btr_bulk->latch();
|
||||
}
|
||||
|
||||
err = row_merge_insert_index_tuples(
|
||||
index[i], old_table,
|
||||
OS_FILE_CLOSED, NULL, buf, clust_btr_bulk,
|
||||
OS_FILE_CLOSED, NULL, buf,
|
||||
clust_btr_bulk,
|
||||
table_total_rows,
|
||||
curr_progress,
|
||||
pct_cost,
|
||||
|
@ -2643,13 +2642,13 @@ write_buffers:
|
|||
trx->error_key_num = i;
|
||||
goto all_done;);
|
||||
|
||||
BtrBulk btr_bulk(index[i], trx->id,
|
||||
BtrBulk btr_bulk(index[i], trx,
|
||||
observer);
|
||||
btr_bulk.init();
|
||||
|
||||
err = row_merge_insert_index_tuples(
|
||||
index[i], old_table,
|
||||
OS_FILE_CLOSED, NULL, buf, &btr_bulk,
|
||||
OS_FILE_CLOSED, NULL, buf,
|
||||
&btr_bulk,
|
||||
table_total_rows,
|
||||
curr_progress,
|
||||
pct_cost,
|
||||
|
@ -4673,11 +4672,15 @@ row_merge_build_indexes(
|
|||
we use bulk load to create all types of indexes except spatial index,
|
||||
for which redo logging is enabled. If we create only spatial indexes,
|
||||
we don't need to flush dirty pages at all. */
|
||||
bool need_flush_observer = (old_table != new_table);
|
||||
bool need_flush_observer = bool(innodb_log_optimize_ddl);
|
||||
|
||||
for (i = 0; i < n_indexes; i++) {
|
||||
if (!dict_index_is_spatial(indexes[i])) {
|
||||
need_flush_observer = true;
|
||||
if (need_flush_observer) {
|
||||
need_flush_observer = old_table != new_table;
|
||||
|
||||
for (i = 0; i < n_indexes; i++) {
|
||||
if (!dict_index_is_spatial(indexes[i])) {
|
||||
need_flush_observer = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4921,9 +4924,8 @@ wait_again:
|
|||
os_thread_sleep(20000000);); /* 20 sec */
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
BtrBulk btr_bulk(sort_idx, trx->id,
|
||||
BtrBulk btr_bulk(sort_idx, trx,
|
||||
flush_observer);
|
||||
btr_bulk.init();
|
||||
|
||||
pct_cost = (COST_BUILD_INDEX_STATIC +
|
||||
(total_dynamic_cost * merge_files[i].offset /
|
||||
|
@ -4976,14 +4978,16 @@ wait_again:
|
|||
ut_ad(sort_idx->online_status
|
||||
== ONLINE_INDEX_COMPLETE);
|
||||
} else {
|
||||
ut_ad(need_flush_observer);
|
||||
if (flush_observer) {
|
||||
flush_observer->flush();
|
||||
row_merge_write_redo(indexes[i]);
|
||||
}
|
||||
|
||||
if (global_system_variables.log_warnings > 2) {
|
||||
sql_print_information(
|
||||
"InnoDB: Online DDL : Applying"
|
||||
" log to index");
|
||||
}
|
||||
flush_observer->flush();
|
||||
row_merge_write_redo(indexes[i]);
|
||||
|
||||
DEBUG_SYNC_C("row_log_apply_before");
|
||||
error = row_log_apply(trx, sort_idx, table, stage);
|
||||
|
|
|
@ -4973,6 +4973,13 @@ wrong_offs:
|
|||
if (!rec_get_deleted_flag(rec, comp)) {
|
||||
goto no_gap_lock;
|
||||
}
|
||||
|
||||
/* At most one transaction can be active
|
||||
for temporary table. */
|
||||
if (clust_index->table->is_temporary()) {
|
||||
goto no_gap_lock;
|
||||
}
|
||||
|
||||
if (index == clust_index) {
|
||||
trx_id_t trx_id = row_get_rec_trx_id(
|
||||
rec, index, offsets);
|
||||
|
|
|
@ -126,6 +126,8 @@ row_vers_impl_x_locked_low(
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
ut_ad(!clust_index->table->is_temporary());
|
||||
|
||||
trx_t* trx;
|
||||
|
||||
if (trx_id == caller_trx->id) {
|
||||
|
|
Loading…
Add table
Reference in a new issue