Merge bb-10.2-ext into 10.3

This commit is contained in:
Marko Mäkelä 2017-09-14 09:12:47 +03:00
commit 348eaf4252
56 changed files with 836 additions and 494 deletions

View file

@ -117,6 +117,7 @@ xb_mysql_connect()
mysql_options(connection, MYSQL_PLUGIN_DIR, xb_plugin_dir);
}
mysql_options(connection, MYSQL_OPT_PROTOCOL, &opt_protocol);
mysql_options(connection,MYSQL_SET_CHARSET_NAME, "utf8");
msg_ts("Connecting to MySQL server host: %s, user: %s, password: %s, "
"port: %s, socket: %s\n", opt_host ? opt_host : "localhost",
@ -1629,3 +1630,86 @@ backup_cleanup()
mysql_close(mysql_connection);
}
}
static pthread_mutex_t mdl_lock_con_mutex;
static MYSQL *mdl_con = NULL;
void
mdl_lock_init()
{
pthread_mutex_init(&mdl_lock_con_mutex, NULL);
mdl_con = xb_mysql_connect();
if (mdl_con)
{
xb_mysql_query(mdl_con, "BEGIN", false, true);
}
}
#ifndef DBUF_OFF
/* Test that table is really locked, if lock_ddl_per_table is set.
The test is executed in DBUG_EXECUTE_IF block inside mdl_lock_table().
*/
static void check_mdl_lock_works(const char *table_name)
{
MYSQL *test_con= xb_mysql_connect();
char *query;
xb_a(asprintf(&query,
"SET STATEMENT max_statement_time=1 FOR ALTER TABLE %s"
" ADD COLUMN mdl_lock_column int", table_name));
int err = mysql_query(test_con, query);
DBUG_ASSERT(err);
int err_no = mysql_errno(test_con);
DBUG_ASSERT(err_no == ER_STATEMENT_TIMEOUT);
mysql_close(test_con);
free(query);
}
#endif
extern void
dict_fs2utf8(const char*, char*, size_t, char*, size_t);
void
mdl_lock_table(ulint space_id)
{
static const char q[] = "SELECT NAME "
"FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES "
"WHERE SPACE = " ULINTPF " AND NAME LIKE '%%/%%'";
char query[22 + sizeof q];
snprintf(query, sizeof query, q, space_id);
pthread_mutex_lock(&mdl_lock_con_mutex);
MYSQL_RES *mysql_result = xb_mysql_query(mdl_con, query, true, true);
while (MYSQL_ROW row = mysql_fetch_row(mysql_result)) {
char full_table_name[2*FN_REFLEN +2];
char db_utf8[FN_REFLEN];
char table_utf8[FN_REFLEN];
static const char lq[] = "SELECT * FROM %s LIMIT 0";
char lock_query[sizeof full_table_name + sizeof lq];
dict_fs2utf8(row[0], db_utf8, sizeof db_utf8,table_utf8,sizeof table_utf8);
snprintf(full_table_name,sizeof(full_table_name),"`%s`.`%s`",db_utf8,table_utf8);
msg_ts("Locking MDL for %s\n", full_table_name);
snprintf(lock_query, sizeof lock_query, lq, full_table_name);
xb_mysql_query(mdl_con, lock_query, false, false);
DBUG_EXECUTE_IF("check_mdl_lock_works",
check_mdl_lock_works(full_table_name););
}
pthread_mutex_unlock(&mdl_lock_con_mutex);
mysql_free_result(mysql_result);
}
void
mdl_unlock_all()
{
msg_ts("Unlocking MDL for all tables\n");
xb_mysql_query(mdl_con, "COMMIT", false, true);
mysql_close(mdl_con);
pthread_mutex_destroy(&mdl_lock_con_mutex);
}

View file

@ -297,6 +297,8 @@ my_bool opt_noversioncheck = FALSE;
my_bool opt_no_backup_locks = FALSE;
my_bool opt_decompress = FALSE;
my_bool opt_lock_ddl_per_table = FALSE;
static const char *binlog_info_values[] = {"off", "lockless", "on", "auto",
NullS};
static TYPELIB binlog_info_typelib = {array_elements(binlog_info_values)-1, "",
@ -538,7 +540,8 @@ enum options_xtrabackup
OPT_XTRA_TABLES_EXCLUDE,
OPT_XTRA_DATABASES_EXCLUDE,
OPT_PROTOCOL
OPT_PROTOCOL,
OPT_LOCK_DDL_PER_TABLE
};
struct my_option xb_client_options[] =
@ -1073,6 +1076,11 @@ struct my_option xb_server_options[] =
(G_PTR*) &xb_open_files_limit, (G_PTR*) &xb_open_files_limit, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, UINT_MAX, 0, 1, 0},
{"lock-ddl-per-table", OPT_LOCK_DDL_PER_TABLE, "Lock DDL for each table "
"before xtrabackup starts to copy it and until the backup is completed.",
(uchar*) &opt_lock_ddl_per_table, (uchar*) &opt_lock_ddl_per_table, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@ -2203,6 +2211,10 @@ xtrabackup_copy_datafile(fil_node_t* node, uint thread_n)
return(FALSE);
}
if (opt_lock_ddl_per_table) {
mdl_lock_table(node->space->id);
}
if (!changed_page_bitmap) {
read_filter = &rf_pass_through;
}
@ -2345,10 +2357,18 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
scanned_checkpoint = checkpoint;
ulint data_len = log_block_get_data_len(log_block);
scanned_lsn += data_len;
if (data_len != OS_FILE_LOG_BLOCK_SIZE) {
/* The current end of the log was reached. */
if (data_len == OS_FILE_LOG_BLOCK_SIZE) {
/* We got a full log block. */
scanned_lsn += data_len;
} else if (data_len
>= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE
|| data_len <= LOG_BLOCK_HDR_SIZE) {
/* We got a garbage block (abrupt end of the log). */
break;
} else {
/* We got a partial block (abrupt end of the log). */
scanned_lsn += data_len;
break;
}
}
@ -2361,7 +2381,7 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
if (ulint write_size = ulint(end_lsn - start_lsn)) {
if (srv_encrypt_log) {
log_crypt(log_sys->buf, write_size);
log_crypt(log_sys->buf, start_lsn, write_size);
}
if (ds_write(dst_log_file, log_sys->buf, write_size)) {
@ -3550,6 +3570,10 @@ xtrabackup_backup_func()
"or RENAME TABLE during the backup, inconsistent backup will be "
"produced.\n");
if (opt_lock_ddl_per_table) {
mdl_lock_init();
}
/* initialize components */
if(innodb_init_param()) {
fail:
@ -3739,10 +3763,10 @@ old_format:
const byte* buf = log_sys->checkpoint_buf;
checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
reread_log_header:
checkpoint_lsn_start = log_sys->log.lsn;
checkpoint_no_start = log_sys->next_checkpoint_no;
err = recv_find_max_checkpoint(&max_cp_field);
if (err != DB_SUCCESS) {
@ -3756,10 +3780,9 @@ reread_log_header:
ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT)
& ~LOG_HEADER_FORMAT_ENCRYPTED));
if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
log_group_header_read(&log_sys->log, max_cp_field);
checkpoint_lsn_start = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
checkpoint_no_start = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
goto reread_log_header;
}
@ -3928,6 +3951,10 @@ reread_log_header:
goto fail;
}
if (opt_lock_ddl_per_table) {
mdl_unlock_all();
}
xtrabackup_destroy_datasinks();
msg("xtrabackup: Redo log (from LSN " LSN_PF " to " LSN_PF

View file

@ -193,4 +193,8 @@ xb_get_one_option(int optid,
const char*
xb_get_copy_action(const char *dflt = "Copying");
void mdl_lock_init();
void mdl_lock_table(ulint space_id);
void mdl_unlock_all();
#endif /* XB_XTRABACKUP_H */

View file

@ -3372,4 +3372,11 @@ create table t1 (col1 int default(-(default(col1))));
ERROR 01000: Expression for field `col1` is refering to uninitialized field `col1`
create table t1 (col int default (yearweek((exp(710)))));
ERROR 22003: DOUBLE value is out of range in 'exp(710)'
#
# MDEV-13707 Server in ORACLE mode crashes on ALTER with wrong DEFAULT clause
#
CREATE OR REPLACE TABLE t1(i int);
ALTER TABLE t1 ADD b CHAR(255) DEFAULT `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`;
ERROR 42S22: Unknown column 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' in 'DEFAULT'
DROP TABLE t1;
# end of 10.2 test

View file

@ -10041,3 +10041,141 @@ EXPLAIN
}
}
drop table t1,t2,t3,t4;
#
# MDEV-13709: Optimization for semi-joins of grouping derived tables
# (Splitting derived tables / views with GROUP BY)
#
CREATE TABLE t1 (i int);
INSERT INTO t1 VALUES (1),(9),(3);
CREATE TABLE t2 (a int, i int);
INSERT INTO t2 VALUES (1,9),(2,3),(3,7),(4,1);
CREATE TABLE t3 (a int, c varchar(8), index(c));
INSERT INTO t3 VALUES (1,'foo'),(3,'bar'),(4,'foo'),(2,'bar');
CREATE TABLE t4 (c varchar(8));
INSERT INTO t4 VALUES ('abc'),('foo'),('def');
CREATE VIEW v1 AS
SELECT c FROM t3
WHERE a IN ( SELECT t2.a FROM t1 JOIN t2 WHERE t1.i = t2.i ) GROUP BY c ;
set statement optimizer_switch='split_grouping_derived=off' for SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 );
c
foo
SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 );
c
foo
explain extended SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 );
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t4 ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY <derived3> ref key0 key0 11 test.t4.c 4 100.00 FirstMatch(t4)
3 LATERAL DERIVED t3 ALL c NULL NULL NULL 4 75.00 Using where
3 LATERAL DERIVED <subquery4> eq_ref distinct_key distinct_key 4 func 1 100.00
4 MATERIALIZED t1 ALL NULL NULL NULL NULL 3 100.00
4 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 /* select#1 */ select `test`.`t4`.`c` AS `c` from `test`.`t4` semi join (`test`.`v1`) where `v1`.`c` = `test`.`t4`.`c`
explain format=json SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 );
EXPLAIN
{
"query_block": {
"select_id": 1,
"table": {
"table_name": "t4",
"access_type": "ALL",
"rows": 3,
"filtered": 100,
"attached_condition": "t4.c is not null"
},
"table": {
"table_name": "<derived3>",
"access_type": "ref",
"possible_keys": ["key0"],
"key": "key0",
"key_length": "11",
"used_key_parts": ["c"],
"ref": ["test.t4.c"],
"rows": 4,
"filtered": 100,
"first_match": "t4",
"materialized": {
"query_block": {
"select_id": 3,
"const_condition": "1",
"table": {
"table_name": "t3",
"access_type": "ALL",
"possible_keys": ["c"],
"rows": 4,
"filtered": 75,
"attached_condition": "t3.c = t4.c"
},
"table": {
"table_name": "<subquery4>",
"access_type": "eq_ref",
"possible_keys": ["distinct_key"],
"key": "distinct_key",
"key_length": "4",
"used_key_parts": ["a"],
"ref": ["func"],
"rows": 1,
"filtered": 100,
"materialized": {
"unique": 1,
"query_block": {
"select_id": 4,
"table": {
"table_name": "t1",
"access_type": "ALL",
"rows": 3,
"filtered": 100
},
"block-nl-join": {
"table": {
"table_name": "t2",
"access_type": "ALL",
"rows": 4,
"filtered": 100
},
"buffer_type": "flat",
"buffer_size": "256Kb",
"join_type": "BNL",
"attached_condition": "t2.i = t1.i and t2.i = t1.i"
}
}
}
}
}
}
}
}
}
DROP VIEW v1;
DROP TABLE t1,t2,t3,t4;
#
# MDEV-13710: Optimization for equi-joins of grouping derived tables
# (Splitting derived tables / views with GROUP BY) :
# FROM list of the derived table contains constant tables
#
CREATE TABLE t1 (a int, INDEX(a)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (9),(5),(1);
CREATE TABLE t2 (b int) ENGINE=MyISAM;
CREATE TABLE t3 (c varchar(8), d int) ENGINE=MyISAM;
INSERT INTO t3 VALUES ('foo',2),('bar',6);
CREATE VIEW v1 AS SELECT a FROM t1, t2 GROUP BY a;
SELECT * FROM t3
WHERE d IN ( SELECT * FROM v1 ) AND c LIKE 'z%' OR c IS NULL;
c d
DROP VIEW v1;
DROP TABLE t1,t2,t3;
#
# MDEV-13734: Optimization for equi-joins of grouping derived tables
# (Splitting derived tables / views with GROUP BY) :
# derived table / view is empty
#
CREATE TABLE t1 (a int, b int, INDEX(a)) ENGINE=MyISAM;
CREATE TABLE t2 (c int) ENGINE=MyISAM;
CREATE VIEW v1 AS SELECT a, b FROM t1 STRAIGHT_JOIN t2;
CREATE VIEW v2 AS SELECT a, max(b) as bmax FROM v1 GROUP BY a;
CREATE VIEW v3 AS SELECT v2.* FROM t1 JOIN v2 ON t1.b = v2.bmax ;
SELECT * FROM v3 JOIN t1 ON (bmax = b);
a bmax a b
DROP VIEW v1,v2,v3;
DROP TABLE t1,t2;

View file

@ -669,6 +669,42 @@ JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = 1
select JSON_EXTRACT('{\"input1\":\"\\u00f6\"}', '$.\"input1\"');
JSON_EXTRACT('{\"input1\":\"\\u00f6\"}', '$.\"input1\"')
"\u00f6"
select JSON_EXTRACT('{"foo": "bar" foobar foo invalid ', '$.foo');
JSON_EXTRACT('{"foo": "bar" foobar foo invalid ', '$.foo')
NULL
Warnings:
Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 15
SELECT JSON_OBJECT('foo', '`');
JSON_OBJECT('foo', '`')
{"foo": "`"}
SELECT JSON_OBJECT("foo", "bar`bar");
JSON_OBJECT("foo", "bar`bar")
{"foo": "bar`bar"}
SELECT JSON_SET('{}', '$.age', 87);
JSON_SET('{}', '$.age', 87)
{"age": 87}
SELECT JSON_MERGE('[]', '{"c":"d"}');
JSON_MERGE('[]', '{"c":"d"}')
[{"c": "d"}]
SET @str = "{\"\\u00e4\\u00f6\":\"yes\"}";
SET @path = "$.\"\\u00e4\\u00f6\"";
select @str, @path, JSON_EXTRACT(@str, @path);
@str @path JSON_EXTRACT(@str, @path)
{"\u00e4\u00f6":"yes"} $."\u00e4\u00f6" "yes"
SET @str = "{\"\\u00e4\":\"yes\"}";
SET @path = "$.\"\\u00e4\"";
select @str, @path, JSON_EXTRACT(@str, @path);
@str @path JSON_EXTRACT(@str, @path)
{"\u00e4":"yes"} $."\u00e4" "yes"
select json_array(5,json_query('[1,2]','$'));
json_array(5,json_query('[1,2]','$'))
[5, [1,2]]
SELECT JSON_ARRAY('1. ě 2. š 3. č 4. ř 5. ž 6. ý 7. á 8. í 9. é 10. ů 11. ú') AS json_data;
json_data
["1. ě 2. š 3. č 4. ř 5. ž 6. ý 7. á 8. í 9. é 10. ů 11. ú"]
SELECT JSON_OBJECT("user","Jožko Mrkvičká") as json_data;
json_data
{"user": "Jožko Mrkvičká"}
#
# Start of 10.3 tests
#

View file

@ -6638,6 +6638,29 @@ drop table procViewTable;
use test;
drop database bugTest;
#
# MDEV-13436: PREPARE doesn't work as expected & throws errors but
# MySQL is working fine
#
create table t1 (a int);
insert into t1 values (1),(2);
SET @sql_query = "
CREATE VIEW v1 AS
SELECT * FROM (
SELECT CASE WHEN 1 IN (SELECT a from t1 where a < 2) THEN TRUE END AS testcase
) testalias
";
PREPARE stmt FROM @sql_query;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
show create view v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `testalias`.`testcase` AS `testcase` from (select case when 1 in (select `test`.`t1`.`a` from `test`.`t1` where `test`.`t1`.`a` < 2) then 1 end AS `testcase`) `testalias` latin1 latin1_swedish_ci
SELECT * FROM v1;
testcase
1
drop view v1;
drop table t1;
#
# End of 10.2 tests
#
#

View file

@ -1,13 +1,19 @@
set global innodb_purge_stop_now = 1;
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
connect purge_control,localhost,root;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
create table t1(f1 int not null, f2 blob not null, f3 blob not null,
vchar char(2) as (substr(f3,2,2)) virtual,
primary key(f1, f3(5)), index(vchar))engine=innodb;
insert into t1(f1,f2,f3) values(1, repeat('a',8000), repeat('b', 9000));
update t1 set f1=5 where f1=1;
delete from t1 where f1=5;
set global innodb_purge_run_now=1;
set global innodb_fast_shutdown=0;
set global innodb_purge_stop_now = 1;
connection purge_control;
COMMIT;
InnoDB 0 transactions not purged
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
drop table t1;
create table t1(f1 int not null, f2 blob not null, f3 blob not null,
vchar char(2) as (substr(f3,2,2)) virtual,
@ -15,9 +21,11 @@ primary key(f1, f3(5)), index(vchar, f3(2)))engine=innodb;
insert into t1(f1,f2,f3) values(1, repeat('a',8000), repeat('b', 9000));
update t1 set f1=5 where f1=1;
delete from t1 where f1=5;
set global innodb_purge_run_now=1;
set global innodb_fast_shutdown=0;
set global innodb_purge_stop_now = 1;
connection purge_control;
COMMIT;
InnoDB 0 transactions not purged
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
drop table t1;
create table t1(f1 int not null, f2 blob not null, f3 blob not null,
vchar blob as (f3) virtual,
@ -25,6 +33,10 @@ primary key(f1, f3(5)), index(vchar(3)))engine=innodb;
insert into t1(f1,f2,f3) values(1, repeat('a',8000), repeat('b', 9000));
update t1 set f1=5 where f1=1;
delete from t1 where f1=5;
set global innodb_purge_run_now=1;
set global innodb_fast_shutdown=0;
connection purge_control;
COMMIT;
InnoDB 0 transactions not purged
disconnect purge_control;
connection default;
drop table t1;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -1,9 +1,12 @@
--source include/have_debug.inc
--source include/have_innodb.inc
# The embedded server does not support restarting.
--source include/not_embedded.inc
set global innodb_purge_stop_now = 1;
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
connect (purge_control,localhost,root);
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
# Index on virtual column
@ -16,10 +19,12 @@ insert into t1(f1,f2,f3) values(1, repeat('a',8000), repeat('b', 9000));
update t1 set f1=5 where f1=1;
delete from t1 where f1=5;
set global innodb_purge_run_now=1;
set global innodb_fast_shutdown=0;
--source include/restart_mysqld.inc
set global innodb_purge_stop_now = 1;
connection purge_control;
COMMIT;
--source ../../innodb/include/wait_all_purged.inc
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
drop table t1;
# Index on virtual column and blob
@ -33,10 +38,11 @@ insert into t1(f1,f2,f3) values(1, repeat('a',8000), repeat('b', 9000));
update t1 set f1=5 where f1=1;
delete from t1 where f1=5;
set global innodb_purge_run_now=1;
set global innodb_fast_shutdown=0;
--source include/restart_mysqld.inc
set global innodb_purge_stop_now = 1;
connection purge_control;
COMMIT;
--source ../../innodb/include/wait_all_purged.inc
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
drop table t1;
# Index on virtual column of blob type
@ -50,7 +56,12 @@ insert into t1(f1,f2,f3) values(1, repeat('a',8000), repeat('b', 9000));
update t1 set f1=5 where f1=1;
delete from t1 where f1=5;
set global innodb_purge_run_now=1;
set global innodb_fast_shutdown=0;
--source include/restart_mysqld.inc
connection purge_control;
COMMIT;
--source ../../innodb/include/wait_all_purged.inc
disconnect purge_control;
connection default;
drop table t1;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View file

@ -494,7 +494,9 @@ c4 VARCHAR(2048),
INDEX idx1(c2),
INDEX idx2(c3(512)),
INDEX idx3(c4(512))) Engine=InnoDB;
SET GLOBAL INNODB_PURGE_STOP_NOW=ON;
connect purge_control,localhost,root;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
SET GLOBAL innodb_disable_background_merge=ON;
SET GLOBAL innodb_monitor_reset = ibuf_merges;
SET GLOBAL innodb_monitor_reset = ibuf_merges_insert;
@ -659,7 +661,10 @@ FROM information_schema.innodb_metrics
WHERE name = 'ibuf_merges_inserts' AND count > 0;
name
SET GLOBAL innodb_disable_background_merge=OFF;
SET GLOBAL INNODB_PURGE_RUN_NOW=ON;
connection purge_control;
COMMIT;
disconnect purge_control;
connection default;
DROP TABLE test_wl5522.t1;
CREATE TABLE test_wl5522.t1 (
c1 BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY,

View file

@ -1055,7 +1055,9 @@ CREATE TABLE test_wl5522.t1 (
INDEX idx3(c4(512))) Engine=InnoDB;
# Stop purge so that it doesn't remove the delete marked entries.
SET GLOBAL INNODB_PURGE_STOP_NOW=ON;
connect (purge_control,localhost,root);
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
# Disable change buffer merge from the master thread, additionally
# enable aggressive flushing so that more changes are buffered.
@ -1125,7 +1127,10 @@ SELECT name
SET GLOBAL innodb_disable_background_merge=OFF;
# Enable normal operation
SET GLOBAL INNODB_PURGE_RUN_NOW=ON;
connection purge_control;
COMMIT;
disconnect purge_control;
connection default;
DROP TABLE test_wl5522.t1;

View file

@ -56,6 +56,7 @@ INSERT INTO t1 VALUES("left3", ST_GeomFromText('POLYGON (( -3 0, -3 2, -1 2, -1
SET @p = ST_GeomFromText('POLYGON (( 0 0, 0 2, 2 2, 2 0, 0 0))');
SELECT name, ST_AsText(square) from t1 where MBRContains(@p, square);
name ST_AsText(square)
small POLYGON((0 0,0 1,1 1,1 0,0 0))
SELECT name, ST_AsText(square) from t1 where MBRDisjoint(@p, square);
name ST_AsText(square)
up3 POLYGON((0 3,0 5,2 5,2 3,0 3))
@ -90,6 +91,7 @@ down2 POLYGON((0 -2,0 0,2 0,2 -2,0 -2))
left2 POLYGON((-2 0,-2 2,0 2,0 0,-2 0))
SELECT name, ST_AsText(square) from t1 where MBRWithin(@p, square);
name ST_AsText(square)
big POLYGON((0 0,0 3,3 3,3 0,0 0))
SET @vert1 = ST_GeomFromText('POLYGON ((0 -2, 0 2, 0 -2))');
SET @horiz1 = ST_GeomFromText('POLYGON ((-2 0, 2 0, -2 0))');
SET @horiz2 = ST_GeomFromText('POLYGON ((-1 0, 3 0, -1 0))');
@ -217,7 +219,7 @@ SELECT COUNT(*)
FROM t1
WHERE ST_CONTAINS(ST_GeomFromText('POLYGON((2 2,4 2, 4 4, 2 4, 2 2))'),way);
COUNT(*)
0
9
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
@ -226,7 +228,7 @@ SELECT COUNT(*)
FROM t1
WHERE ST_CONTAINS(ST_GeomFromText('POLYGON((2 2,4 2, 4 4, 2 4, 2 2))'),way);
COUNT(*)
0
9
DROP TABLE t1;
CREATE TABLE t1( i INT, g GEOMETRY NOT NULL, SPATIAL INDEX (g)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1, LINESTRING(POINT(1,1), POINT(4, 4)));

View file

@ -123,7 +123,9 @@ INDEX idx1(c2),
INDEX idx2(c3(512)),
INDEX idx3(c4(512))) Engine=InnoDB
ROW_FORMAT=COMPRESSED;
SET GLOBAL INNODB_PURGE_STOP_NOW=ON;
connect purge_control,localhost,root;
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
SET GLOBAL innodb_disable_background_merge=ON;
SET GLOBAL innodb_monitor_reset = ibuf_merges;
SET GLOBAL innodb_monitor_reset = ibuf_merges_insert;
@ -288,7 +290,10 @@ FROM information_schema.innodb_metrics
WHERE name = 'ibuf_merges_inserts' AND count > 0;
name
SET GLOBAL innodb_disable_background_merge=OFF;
SET GLOBAL INNODB_PURGE_RUN_NOW=ON;
connection purge_control;
COMMIT;
disconnect purge_control;
connection default;
DROP TABLE test_wl5522.t1;
CREATE TABLE test_wl5522.t1 (
c1 BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY,

View file

@ -321,7 +321,9 @@ CREATE TABLE test_wl5522.t1 (
ROW_FORMAT=COMPRESSED;
# Stop purge so that it doesn't remove the delete marked entries.
SET GLOBAL INNODB_PURGE_STOP_NOW=ON;
connect (purge_control,localhost,root);
START TRANSACTION WITH CONSISTENT SNAPSHOT;
connection default;
# Disable change buffer merge from the master thread, additionally
# enable aggressive flushing so that more changes are buffered.
@ -391,7 +393,10 @@ SELECT name
SET GLOBAL innodb_disable_background_merge=OFF;
# Enable normal operation
SET GLOBAL INNODB_PURGE_RUN_NOW=ON;
connection purge_control;
COMMIT;
disconnect purge_control;
connection default;
DROP TABLE test_wl5522.t1;

View file

@ -2109,10 +2109,9 @@ ERROR 42000: Incorrect parameter count in the call to native function 'json_set'
error ER_INVALID_JSON_TEXT_IN_PARAM
SELECT JSON_SET('{}', '$.name', JSON_EXTRACT('', '$'));
JSON_SET('{}', '$.name', JSON_EXTRACT('', '$'))
NULL
{"name": null}
Warnings:
Warning 4037 Unexpected end of JSON text in argument 1 to function 'json_extract'
Warning 4038 Syntax error in JSON text in argument 1 to function 'json_set' at position 2
select json_set('[1,2,3]', '$[2]', 4);
json_set('[1,2,3]', '$[2]', 4)
[1, 2, 4]

View file

@ -0,0 +1,4 @@
CREATE TABLE t(i INT) ENGINE INNODB;
INSERT INTO t VALUES(1);
# xtrabackup backup
DROP TABLE t;

View file

@ -0,0 +1,12 @@
--source include/have_debug.inc
CREATE TABLE t(i INT) ENGINE INNODB;
INSERT INTO t VALUES(1);
echo # xtrabackup backup;
let $targetdir=$MYSQLTEST_VARDIR/tmp/backup;
--disable_result_log
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --lock-ddl-per-table=1 --dbug=+d,check_mdl_lock_works;
--enable_result_log
DROP TABLE t;
rmdir $targetdir;

View file

@ -1 +1 @@
--innodb --loose-changed_page_bitmaps
--innodb --loose-changed_page_bitmaps --innodb-sys-tables

View file

@ -1,27 +0,0 @@
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
name count
purge_stop_count 0
purge_resume_count 0
SET @orig = @@global.innodb_purge_run_now;
SELECT @orig;
@orig
0
SET GLOBAL innodb_purge_stop_now = ON;
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
name count
purge_stop_count 1
purge_resume_count 0
SET GLOBAL innodb_purge_run_now = ON;
SELECT @@global.innodb_purge_run_now;
@@global.innodb_purge_run_now
0
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
name count
purge_stop_count 1
purge_resume_count 1

View file

@ -1,27 +0,0 @@
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
name count
purge_stop_count 0
purge_resume_count 0
SET @orig = @@global.innodb_purge_run_now;
SELECT @orig;
@orig
0
SET GLOBAL innodb_purge_stop_now = ON;
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
name count
purge_stop_count 1
purge_resume_count 0
SET GLOBAL innodb_purge_run_now = ON;
SELECT @@global.innodb_purge_run_now;
@@global.innodb_purge_run_now
0
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
name count
purge_stop_count 1
purge_resume_count 1

View file

@ -1812,34 +1812,6 @@ NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_PURGE_RUN_NOW
SESSION_VALUE NULL
GLOBAL_VALUE OFF
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE OFF
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Set purge state to RUN
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_PURGE_STOP_NOW
SESSION_VALUE NULL
GLOBAL_VALUE OFF
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE OFF
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Set purge state to STOP
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME INNODB_PURGE_THREADS
SESSION_VALUE NULL
GLOBAL_VALUE 4

View file

@ -1,53 +0,0 @@
#
# Basic test for innodb_purge_run_now, note it is a duplicate of
# innodb_purge_stop_now.
#
-- source include/have_innodb.inc
# The config variable is a debug variable for now
-- source include/have_debug.inc
--disable_query_log
# Enable metrics for the counters we are going to use
set global innodb_monitor_enable = purge_stop_count;
set global innodb_monitor_enable = purge_resume_count;
--enable_query_log
# Should be 0 for both
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
# Check the default value
SET @orig = @@global.innodb_purge_run_now;
SELECT @orig;
# Stop of purge
SET GLOBAL innodb_purge_stop_now = ON;
# Stop count should now be 1
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
SET GLOBAL innodb_purge_run_now = ON;
# Should always be OFF
SELECT @@global.innodb_purge_run_now;
# Both should be 1 now
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
--disable_query_log
set global innodb_monitor_disable = all;
set global innodb_monitor_reset_all = all;
-- disable_warnings
set global innodb_monitor_enable = default;
set global innodb_monitor_disable = default;
set global innodb_monitor_reset = default;
set global innodb_monitor_reset_all = default;
-- enable_warnings

View file

@ -1,53 +0,0 @@
#
# Basic test for innodb_purge_run_now, note it is a duplicate of
# innodb_purge_stop_now.
#
-- source include/have_innodb.inc
# The config variable is a debug variable for now
-- source include/have_debug.inc
--disable_query_log
# Enable metrics for the counters we are going to use
set global innodb_monitor_enable = purge_stop_count;
set global innodb_monitor_enable = purge_resume_count;
--enable_query_log
# Should be 0 for both
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
# Check the default value
SET @orig = @@global.innodb_purge_run_now;
SELECT @orig;
# Stop of purge
SET GLOBAL innodb_purge_stop_now = ON;
# Stop count should now be 1
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
SET GLOBAL innodb_purge_run_now = ON;
# Should always be OFF
SELECT @@global.innodb_purge_run_now;
# Both should be 1 now
SELECT name, count
FROM information_schema.innodb_metrics
WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
--disable_query_log
set global innodb_monitor_disable = all;
set global innodb_monitor_reset_all = all;
-- disable_warnings
set global innodb_monitor_enable = default;
set global innodb_monitor_disable = default;
set global innodb_monitor_reset = default;
set global innodb_monitor_reset_all = default;
-- enable_warnings

View file

@ -2093,4 +2093,14 @@ create table t1 (col1 int default(-(default(col1))));
--error ER_DATA_OUT_OF_RANGE
create table t1 (col int default (yearweek((exp(710)))));
--echo #
--echo # MDEV-13707 Server in ORACLE mode crashes on ALTER with wrong DEFAULT clause
--echo #
CREATE OR REPLACE TABLE t1(i int);
--error ER_BAD_FIELD_ERROR
ALTER TABLE t1 ADD b CHAR(255) DEFAULT `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`;
DROP TABLE t1;
--echo # end of 10.2 test

View file

@ -1762,3 +1762,75 @@ eval explain extended $q6;
eval explain format=json $q6;
drop table t1,t2,t3,t4;
--echo #
--echo # MDEV-13709: Optimization for semi-joins of grouping derived tables
--echo # (Splitting derived tables / views with GROUP BY)
--echo #
CREATE TABLE t1 (i int);
INSERT INTO t1 VALUES (1),(9),(3);
CREATE TABLE t2 (a int, i int);
INSERT INTO t2 VALUES (1,9),(2,3),(3,7),(4,1);
CREATE TABLE t3 (a int, c varchar(8), index(c));
INSERT INTO t3 VALUES (1,'foo'),(3,'bar'),(4,'foo'),(2,'bar');
CREATE TABLE t4 (c varchar(8));
INSERT INTO t4 VALUES ('abc'),('foo'),('def');
CREATE VIEW v1 AS
SELECT c FROM t3
WHERE a IN ( SELECT t2.a FROM t1 JOIN t2 WHERE t1.i = t2.i ) GROUP BY c ;
let $q1=
SELECT * FROM t4 WHERE c IN ( SELECT c FROM v1 );
eval $no_splitting $q1;
eval $q1;
eval explain extended $q1;
eval explain format=json $q1;
DROP VIEW v1;
DROP TABLE t1,t2,t3,t4;
--echo #
--echo # MDEV-13710: Optimization for equi-joins of grouping derived tables
--echo # (Splitting derived tables / views with GROUP BY) :
--echo # FROM list of the derived table contains constant tables
--echo #
CREATE TABLE t1 (a int, INDEX(a)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (9),(5),(1);
CREATE TABLE t2 (b int) ENGINE=MyISAM;
CREATE TABLE t3 (c varchar(8), d int) ENGINE=MyISAM;
INSERT INTO t3 VALUES ('foo',2),('bar',6);
CREATE VIEW v1 AS SELECT a FROM t1, t2 GROUP BY a;
SELECT * FROM t3
WHERE d IN ( SELECT * FROM v1 ) AND c LIKE 'z%' OR c IS NULL;
DROP VIEW v1;
DROP TABLE t1,t2,t3;
--echo #
--echo # MDEV-13734: Optimization for equi-joins of grouping derived tables
--echo # (Splitting derived tables / views with GROUP BY) :
--echo # derived table / view is empty
--echo #
CREATE TABLE t1 (a int, b int, INDEX(a)) ENGINE=MyISAM;
CREATE TABLE t2 (c int) ENGINE=MyISAM;
CREATE VIEW v1 AS SELECT a, b FROM t1 STRAIGHT_JOIN t2;
CREATE VIEW v2 AS SELECT a, max(b) as bmax FROM v1 GROUP BY a;
CREATE VIEW v3 AS SELECT v2.* FROM t1 JOIN v2 ON t1.b = v2.bmax ;
SELECT * FROM v3 JOIN t1 ON (bmax = b);
DROP VIEW v1,v2,v3;
DROP TABLE t1,t2;

View file

@ -317,6 +317,50 @@ select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = false;
select JSON_EXTRACT('{\"asdf\":true}', "$.\"asdf\"") = 1;
select JSON_EXTRACT('{\"input1\":\"\\u00f6\"}', '$.\"input1\"');
#
# MDEV-129892 JSON_EXTRACT returns data for invalid JSON
#
select JSON_EXTRACT('{"foo": "bar" foobar foo invalid ', '$.foo');
#
# MDEV-13138 JSON_OBJECT returns null with strings containing backticks.
#
SELECT JSON_OBJECT('foo', '`');
SELECT JSON_OBJECT("foo", "bar`bar");
#
# MDEV-13324 JSON_SET returns NULL instead of object.
#
SELECT JSON_SET('{}', '$.age', 87);
#
# MDEV-13104 Json functions.
#
SELECT JSON_MERGE('[]', '{"c":"d"}');
#
# MDEV-12774 JSON_EXTRACT fails with some escaped unicode as key.
#
SET @str = "{\"\\u00e4\\u00f6\":\"yes\"}";
SET @path = "$.\"\\u00e4\\u00f6\"";
select @str, @path, JSON_EXTRACT(@str, @path);
SET @str = "{\"\\u00e4\":\"yes\"}";
SET @path = "$.\"\\u00e4\"";
select @str, @path, JSON_EXTRACT(@str, @path);
#
# MDEV-12877 Wrong result from JSON native function.
#
select json_array(5,json_query('[1,2]','$'));
#
# MDEV-13633 JSON_ARRAY() - bad output with some UTF8 characters.
#
SELECT JSON_ARRAY('1. ě 2. š 3. č 4. ř 5. ž 6. ý 7. á 8. í 9. é 10. ů 11. ú') AS json_data;
SELECT JSON_OBJECT("user","Jožko Mrkvičká") as json_data;
--echo #
--echo # Start of 10.3 tests
--echo #

View file

@ -6350,6 +6350,27 @@ drop table procViewTable;
use test;
drop database bugTest;
--echo #
--echo # MDEV-13436: PREPARE doesn't work as expected & throws errors but
--echo # MySQL is working fine
--echo #
create table t1 (a int);
insert into t1 values (1),(2);
SET @sql_query = "
CREATE VIEW v1 AS
SELECT * FROM (
SELECT CASE WHEN 1 IN (SELECT a from t1 where a < 2) THEN TRUE END AS testcase
) testalias
";
PREPARE stmt FROM @sql_query;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
show create view v1;
SELECT * FROM v1;
drop view v1;
drop table t1;
--echo #
--echo # End of 10.2 tests
--echo #

View file

@ -1489,9 +1489,9 @@ static inline
void mark_unsupported_func(const char *where, const char *processor_name)
{
char buff[64];
sprintf(buff, "%s::%s", where ? where: "", processor_name);
my_snprintf(buff, sizeof(buff), "%s::%s", where ? where: "", processor_name);
DBUG_ENTER(buff);
sprintf(buff, "%s returns TRUE: unsupported function", processor_name);
my_snprintf(buff, sizeof(buff), "%s returns TRUE: unsupported function", processor_name);
DBUG_PRINT("info", ("%s", buff));
DBUG_VOID_RETURN;
}

View file

@ -550,7 +550,18 @@ public:
maybe_null= true;
}
enum Functype functype() const { return spatial_rel; }
enum Functype rev_functype() const { return spatial_rel; }
enum Functype rev_functype() const
{
switch (spatial_rel)
{
case SP_CONTAINS_FUNC:
return SP_WITHIN_FUNC;
case SP_WITHIN_FUNC:
return SP_CONTAINS_FUNC;
default:
return spatial_rel;
}
}
bool is_null() { (void) val_int(); return null_value; }
void add_key_fields(JOIN *join, KEY_FIELD **key_fields,
uint *and_level, table_map usable_tables,

View file

@ -650,6 +650,7 @@ static int alloc_tmp_paths(THD *thd, uint n_paths,
if (*tmp_paths == 0)
{
MEM_ROOT *root= thd->stmt_arena->mem_root;
*paths= (json_path_with_flags *) alloc_root(root,
sizeof(json_path_with_flags) * n_paths);
*tmp_paths= (String *) alloc_root(root, sizeof(String) * n_paths);
@ -657,6 +658,8 @@ static int alloc_tmp_paths(THD *thd, uint n_paths,
return 1;
bzero(*tmp_paths, sizeof(String) * n_paths);
for (uint c_path=0; c_path < n_paths; c_path++)
(*tmp_paths)[c_path].set_charset(&my_charset_utf8_general_ci);
}
return 0;
@ -821,7 +824,11 @@ String *Item_func_json_extract::read_json(String *str,
not_first_value= 1;
if (!possible_multiple_values)
{
/* Loop to the end of the JSON just to make sure it's valid. */
while (json_get_path_next(&je, &p) == 0) {}
break;
}
}
if (je.s.error)
@ -869,7 +876,7 @@ longlong Item_func_json_extract::val_int()
json_value_types type;
char *value;
int value_len;
longlong i;
longlong i= 0;
if (read_json(NULL, &type, &value, &value_len) != NULL)
{
@ -1467,6 +1474,7 @@ String *Item_func_json_array::val_str(String *str)
uint n_arg;
str->length(0);
str->set_charset(collation.collation);
if (str->append("[", 1) ||
((arg_count > 0) && append_json_value(str, args[0], &tmp_val)))
@ -1791,6 +1799,7 @@ String *Item_func_json_object::val_str(String *str)
uint n_arg;
str->length(0);
str->set_charset(collation.collation);
if (str->append("{", 1) ||
(arg_count > 0 &&
@ -1968,14 +1977,25 @@ continue_j2:
else
{
const uchar *end1, *beg1, *end2, *beg2;
int empty_array= 0;
beg1= je1->value_begin;
/* Merge as a single array. */
if (je1->value_type == JSON_VALUE_ARRAY)
{
if (json_skip_level(je1))
int cur_level= je1->stack_p;
empty_array= 1;
while (json_scan_next(je1) == 0)
{
if (je1->stack_p < cur_level)
break;
empty_array= 0;
}
if (je1->s.error)
return 1;
end1= je1->s.c_str - je1->sav_c_len;
}
else
@ -1992,8 +2012,8 @@ continue_j2:
end1= je1->value_end;
}
if (str->append((const char*) beg1, end1 - beg1),
str->append(", ", 2))
if (str->append((const char*) beg1, end1 - beg1) ||
(!empty_array && str->append(", ", 2)))
return 3;
if (json_value_scalar(je2))
@ -2449,6 +2469,8 @@ String *Item_func_json_insert::val_str(String *str)
}
else /*JSON_PATH_KEY*/
{
uint n_key= 0;
if (je.value_type != JSON_VALUE_OBJECT)
continue;
@ -2460,6 +2482,7 @@ String *Item_func_json_insert::val_str(String *str)
json_string_set_str(&key_name, lp->key, lp->key_end);
if (json_key_matches(&je, &key_name))
goto v_found;
n_key++;
if (json_skip_key(&je))
goto js_error;
break;
@ -2477,7 +2500,8 @@ String *Item_func_json_insert::val_str(String *str)
v_to= (const char *) (je.s.c_str - je.sav_c_len);
str->length(0);
if (append_simple(str, js->ptr(), v_to - js->ptr()) ||
str->append(", \"", 3) ||
(n_key > 0 && str->append(", ", 2)) ||
str->append("\"", 1) ||
append_simple(str, lp->key, lp->key_end - lp->key) ||
str->append("\":", 2) ||
append_json_value(str, args[n_arg+1], &tmp_val) ||

View file

@ -99,6 +99,7 @@ class Item_func_json_query: public Item_func_json_value
public:
Item_func_json_query(THD *thd, Item *js, Item *i_path):
Item_func_json_value(thd, js, i_path) {}
bool is_json_type() { return true; }
const char *func_name() const { return "json_query"; }
bool check_and_get_value(json_engine_t *je, String *res, int *error);
Item *get_copy(THD *thd, MEM_ROOT *mem_root)

View file

@ -891,7 +891,7 @@ bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived)
JOIN *join= first_select->join;
unit->set_limit(unit->global_parameters());
if (join &&
join->optimization_state == JOIN::OPTIMIZATION_IN_STAGE_2 &&
join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE &&
join->with_two_phase_optimization)
{
if (unit->optimized_2)

View file

@ -4608,7 +4608,8 @@ void SELECT_LEX::mark_const_derived(bool empty)
{
if (!empty)
increase_derived_records(1);
if (!master_unit()->is_unit_op() && !derived->is_merged_derived())
if (!master_unit()->is_unit_op() && !derived->is_merged_derived() &&
!(join && join->with_two_phase_optimization))
derived->fill_me= TRUE;
}
}

View file

@ -2085,11 +2085,11 @@ static bool mysql_test_create_view(Prepared_statement *stmt)
if (thd->open_temporary_tables(tables))
goto err;
lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
DT_PREPARE))
goto err;
lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW;
res= select_like_stmt_test(stmt, 0, 0);
err:

View file

@ -1111,7 +1111,7 @@ int JOIN::optimize()
{
int res= 0;
join_optimization_state init_state= optimization_state;
if (optimization_state == JOIN::OPTIMIZATION_IN_STAGE_2)
if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
res= optimize_stage2();
else
{
@ -1123,7 +1123,7 @@ int JOIN::optimize()
res= optimize_inner();
}
if (!with_two_phase_optimization ||
init_state == JOIN::OPTIMIZATION_IN_STAGE_2)
init_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
{
if (!res && have_query_plan != QEP_DELETED)
build_explain();
@ -1341,6 +1341,11 @@ JOIN::optimize_inner()
*/
if (tbl->is_materialized_derived())
{
JOIN *join= tbl->get_unit()->first_select()->join;
if (join &&
join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE &&
join->with_two_phase_optimization)
continue;
/*
Do not push conditions from where into materialized inner tables
of outer joins: this is not valid.
@ -1535,7 +1540,7 @@ JOIN::optimize_inner()
setup_subq_exit:
with_two_phase_optimization= check_two_phase_optimization(thd);
if (with_two_phase_optimization)
optimization_state= JOIN::OPTIMIZATION_IN_STAGE_2;
optimization_state= JOIN::OPTIMIZATION_PHASE_1_DONE;
else
{
if (optimize_stage2())
@ -1556,7 +1561,7 @@ int JOIN::optimize_stage2()
goto setup_subq_exit;
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
DBUG_RETURN(1);
if (thd->check_killed())
DBUG_RETURN(1);
@ -9070,12 +9075,10 @@ bool JOIN::push_splitting_cond_into_derived(THD *thd, Item *cond)
{
enum_reopt_result reopt_result= REOPT_NONE;
table_map all_table_map= 0;
for (JOIN_TAB *tab= join_tab + const_tables;
for (JOIN_TAB *tab= join_tab;
tab < join_tab + top_join_tab_count; tab++)
{
all_table_map|= tab->table->map;
}
reopt_result= reoptimize(cond, all_table_map, NULL);
reopt_result= reoptimize(cond, all_table_map & ~const_table_map, NULL);
if (reopt_result == REOPT_ERROR)
return true;
if (inject_cond_into_where(cond))

View file

@ -1383,7 +1383,7 @@ public:
enum join_optimization_state { NOT_OPTIMIZED=0,
OPTIMIZATION_IN_PROGRESS=1,
OPTIMIZATION_IN_STAGE_2=2,
OPTIMIZATION_PHASE_1_DONE=2,
OPTIMIZATION_DONE=3};
// state of JOIN optimization
enum join_optimization_state optimization_state;

View file

@ -7944,9 +7944,8 @@ int TABLE_LIST::fetch_number_of_rows()
if (jtbm_subselect)
return 0;
if (is_materialized_derived() && !fill_me)
{
table->file->stats.records= ((select_unit*)derived->result)->records;
table->file->stats.records= ((select_unit*)(get_unit()->result))->records;
set_if_bigger(table->file->stats.records, 2);
table->used_stat_records= table->file->stats.records;
}

View file

@ -2697,14 +2697,13 @@ buf_LRU_print_instance(
const byte* frame;
case BUF_BLOCK_FILE_PAGE:
frame = buf_block_get_frame((buf_block_t*) bpage);
fprintf(stderr, "\ntype " ULINTPF
" index id " IB_ID_FMT "\n",
fprintf(stderr, "\ntype %u index id " IB_ID_FMT "\n",
fil_page_get_type(frame),
btr_page_get_index_id(frame));
break;
case BUF_BLOCK_ZIP_PAGE:
frame = bpage->zip.data;
fprintf(stderr, "\ntype " ULINTPF " size " ULINTPF
fprintf(stderr, "\ntype %u size " ULINTPF
" index id " IB_ID_FMT "\n",
fil_page_get_type(frame),
bpage->size.physical(),

View file

@ -19193,8 +19193,6 @@ innobase_fts_find_ranking(FT_INFO* fts_hdl, uchar*, uint)
#ifdef UNIV_DEBUG
static my_bool innodb_background_drop_list_empty = TRUE;
static my_bool innodb_purge_run_now = TRUE;
static my_bool innodb_purge_stop_now = TRUE;
static my_bool innodb_log_checkpoint_now = TRUE;
static my_bool innodb_buf_flush_list_now = TRUE;
static uint innodb_merge_threshold_set_all_debug
@ -19218,52 +19216,6 @@ wait_background_drop_list_empty(
row_wait_for_background_drop_list_empty();
}
/****************************************************************//**
Set the purge state to RUN. If purge is disabled then it
is a no-op. This function is registered as a callback with MySQL. */
static
void
purge_run_now_set(
/*==============*/
THD* thd /*!< in: thread handle */
MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
if (*(my_bool*) save && trx_purge_state() != PURGE_STATE_DISABLED) {
trx_purge_run();
}
}
/****************************************************************//**
Set the purge state to STOP. If purge is disabled then it
is a no-op. This function is registered as a callback with MySQL. */
static
void
purge_stop_now_set(
/*===============*/
THD* thd /*!< in: thread handle */
MY_ATTRIBUTE((unused)),
struct st_mysql_sys_var* var /*!< in: pointer to system
variable */
MY_ATTRIBUTE((unused)),
void* var_ptr /*!< out: where the formal
string goes */
MY_ATTRIBUTE((unused)),
const void* save) /*!< in: immediate result from
check function */
{
if (*(my_bool*) save && trx_purge_state() != PURGE_STATE_DISABLED) {
trx_purge_stop();
}
}
/****************************************************************//**
Force innodb to checkpoint. */
static
@ -20029,16 +19981,6 @@ static MYSQL_SYSVAR_BOOL(background_drop_list_empty,
"Wait for the background drop list to become empty",
NULL, wait_background_drop_list_empty, FALSE);
static MYSQL_SYSVAR_BOOL(purge_run_now, innodb_purge_run_now,
PLUGIN_VAR_OPCMDARG,
"Set purge state to RUN",
NULL, purge_run_now_set, FALSE);
static MYSQL_SYSVAR_BOOL(purge_stop_now, innodb_purge_stop_now,
PLUGIN_VAR_OPCMDARG,
"Set purge state to STOP",
NULL, purge_stop_now_set, FALSE);
static MYSQL_SYSVAR_BOOL(log_checkpoint_now, innodb_log_checkpoint_now,
PLUGIN_VAR_OPCMDARG,
"Force checkpoint now",
@ -21248,8 +21190,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(purge_batch_size),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(background_drop_list_empty),
MYSQL_SYSVAR(purge_run_now),
MYSQL_SYSVAR(purge_stop_now),
MYSQL_SYSVAR(log_checkpoint_now),
MYSQL_SYSVAR(buf_flush_list_now),
MYSQL_SYSVAR(merge_threshold_set_all_debug),

View file

@ -1321,9 +1321,8 @@ fil_page_reset_type(
@param[in] page file page
@return page type */
inline
ulint
fil_page_get_type(
const byte* page)
uint16_t
fil_page_get_type(const byte* page)
{
return(mach_read_from_2(page + FIL_PAGE_TYPE));
}

View file

@ -32,7 +32,11 @@ MDEV-11782: Rewritten for MariaDB 10.2 by Marko Mäkelä, MariaDB Corporation.
/** innodb_encrypt_log: whether to encrypt the redo log */
extern my_bool srv_encrypt_log;
/** Initialize the redo log encryption key.
/** Initialize the redo log encryption key and random parameters
when creating a new redo log.
The random parameters will be persisted in the log checkpoint pages.
@see log_crypt_write_checkpoint_buf()
@see log_crypt_read_checkpoint_buf()
@return whether the operation succeeded */
UNIV_INTERN
bool
@ -71,10 +75,11 @@ log_crypt_read_checkpoint_buf(const byte* buf);
/** Encrypt or decrypt log blocks.
@param[in,out] buf log blocks to encrypt or decrypt
@param[in] lsn log sequence number of the start of the buffer
@param[in] size size of the buffer, in bytes
@param[in] decrypt whether to decrypt instead of encrypting */
UNIV_INTERN
void
log_crypt(byte* buf, ulint size, bool decrypt = false);
log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt = false);
#endif // log0crypt.h

View file

@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2013, 2016, MariaDB Corporation
Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -264,7 +264,7 @@ page_set_ssn_id(
/*************************************************************//**
Reads the given header field. */
UNIV_INLINE
ulint
uint16_t
page_header_get_field(
/*==================*/
const page_t* page, /*!< in: page */

View file

@ -170,7 +170,7 @@ page_set_ssn_id(
/*************************************************************//**
Reads the given header field. */
UNIV_INLINE
ulint
uint16_t
page_header_get_field(
/*==================*/
const page_t* page, /*!< in: page */
@ -731,7 +731,7 @@ page_dir_set_n_heap(
ulint n_heap) /*!< in: number of records */
{
ut_ad(n_heap < 0x8000);
ut_ad(!page_zip || n_heap
ut_ad(!page_zip || uint16_t(n_heap)
== (page_header_get_field(page, PAGE_N_HEAP) & 0x7fff) + 1);
page_header_set_field(page, page_zip, PAGE_N_HEAP, n_heap

View file

@ -777,19 +777,42 @@ rec_copy(
const rec_t* rec,
const ulint* offsets);
/**********************************************************//**
Determines the size of a data tuple prefix in a temporary file.
@return total size */
/** Determine the size of a data tuple prefix in a temporary file.
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[out] extra record header size
@return total size, in bytes */
ulint
rec_get_converted_size_temp(
/*========================*/
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
const dtuple_t* v_entry,/*!< in: dtuple contains virtual column
data */
ulint* extra) /*!< out: extra size */
MY_ATTRIBUTE((warn_unused_result));
const dict_index_t* index,
const dfield_t* fields,
ulint n_fields,
ulint* extra)
MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)));
/** Determine the converted size of virtual column data in a temporary file.
@see rec_convert_dtuple_to_temp_v()
@param[in] index clustered index
@param[in] v clustered index record augmented with the values
of virtual columns
@return size in bytes */
ulint
rec_get_converted_size_temp_v(const dict_index_t* index, const dtuple_t* v)
MY_ATTRIBUTE((warn_unused_result, nonnull));
/** Write indexed virtual column data into a temporary file.
@see rec_get_converted_size_temp_v()
@param[out] rec serialized record
@param[in] index clustered index
@param[in] v_entry clustered index record augmented with the values
of virtual columns */
void
rec_convert_dtuple_to_temp_v(
byte* rec,
const dict_index_t* index,
const dtuple_t* v_entry)
MY_ATTRIBUTE((nonnull));
/******************************************************//**
Determine the offset to each field in temporary file.
@ -812,10 +835,7 @@ rec_convert_dtuple_to_temp(
rec_t* rec, /*!< out: record */
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields, /*!< in: number of fields */
const dtuple_t* v_entry); /*!< in: dtuple contains
virtual column data */
ulint n_fields); /*!< in: number of fields */
/**************************************************************//**
Copies the first n fields of a physical record to a new physical record in

View file

@ -104,11 +104,12 @@ get_crypt_info(ulint checkpoint_no)
/** Encrypt or decrypt log blocks.
@param[in,out] buf log blocks to encrypt or decrypt
@param[in] lsn log sequence number of the start of the buffer
@param[in] size size of the buffer, in bytes
@param[in] decrypt whether to decrypt instead of encrypting */
UNIV_INTERN
void
log_crypt(byte* buf, ulint size, bool decrypt)
log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt)
{
ut_ad(size % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_a(info.key_version);
@ -118,12 +119,12 @@ log_crypt(byte* buf, ulint size, bool decrypt)
compile_time_assert(sizeof(uint32_t) == 4);
#define LOG_CRYPT_HDR_SIZE 4
lsn &= ~lsn_t(OS_FILE_LOG_BLOCK_SIZE - 1);
for (const byte* const end = buf + size; buf != end;
buf += OS_FILE_LOG_BLOCK_SIZE) {
buf += OS_FILE_LOG_BLOCK_SIZE, lsn += OS_FILE_LOG_BLOCK_SIZE) {
uint32_t dst[(OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE)
/ sizeof(uint32_t)];
const ulint log_block_no = log_block_get_hdr_no(buf);
/* The log block number is not encrypted. */
*aes_ctr_iv =
@ -138,10 +139,10 @@ log_crypt(byte* buf, ulint size, bool decrypt)
# error "LOG_BLOCK_HDR_NO has been moved; redo log format affected!"
#endif
aes_ctr_iv[1] = info.crypt_nonce.word;
mach_write_to_8(reinterpret_cast<byte*>(aes_ctr_iv + 2),
log_block_get_start_lsn(
decrypt ? srv_start_lsn : log_sys->lsn,
log_block_no));
mach_write_to_8(reinterpret_cast<byte*>(aes_ctr_iv + 2), lsn);
ut_ad(log_block_get_start_lsn(lsn,
log_block_get_hdr_no(buf))
== lsn);
int rc = encryption_crypt(
buf + LOG_CRYPT_HDR_SIZE, sizeof dst,
@ -207,7 +208,11 @@ init_crypt_key(crypt_info_t* info, bool upgrade = false)
return true;
}
/** Initialize the redo log encryption key.
/** Initialize the redo log encryption key and random parameters
when creating a new redo log.
The random parameters will be persisted in the log checkpoint pages.
@see log_crypt_write_checkpoint_buf()
@see log_crypt_read_checkpoint_buf()
@return whether the operation succeeded */
UNIV_INTERN
bool

View file

@ -997,10 +997,6 @@ loop:
|| log_block_get_hdr_no(buf)
== log_block_convert_lsn_to_no(start_lsn));
if (log_sys->is_encrypted()) {
log_crypt(buf, write_len);
}
/* Calculate the checksums for each log block and write them to
the trailer fields of the log blocks */
@ -1264,6 +1260,12 @@ loop:
::memset(write_buf + area_end, 0, pad_size);
}
}
if (log_sys->is_encrypted()) {
log_crypt(write_buf + area_start, log_sys->write_lsn,
area_end - area_start);
}
/* Do the write to the log files */
log_group_write_buf(
&log_sys->log, write_buf + area_start,

View file

@ -715,7 +715,8 @@ loop:
}
if (group->is_encrypted()) {
log_crypt(buf, OS_FILE_LOG_BLOCK_SIZE, true);
log_crypt(buf, start_lsn,
OS_FILE_LOG_BLOCK_SIZE, true);
}
}
}
@ -1061,6 +1062,7 @@ recv_find_max_checkpoint(ulint* max_field)
buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = mach_read_from_8(
buf + LOG_CHECKPOINT_OFFSET);
log_sys->next_checkpoint_no = checkpoint_no;
}
}

View file

@ -2117,7 +2117,7 @@ page_simple_validate_old(
goto func_exit;
}
if (UNIV_UNLIKELY(page_header_get_field(page, PAGE_N_RECS)
if (UNIV_UNLIKELY(ulint(page_header_get_field(page, PAGE_N_RECS))
+ PAGE_HEAP_NO_USER_LOW
!= count + 1)) {
ib::error() << "n recs wrong "
@ -2308,7 +2308,7 @@ page_simple_validate_new(
goto func_exit;
}
if (UNIV_UNLIKELY(page_header_get_field(page, PAGE_N_RECS)
if (UNIV_UNLIKELY(ulint(page_header_get_field(page, PAGE_N_RECS))
+ PAGE_HEAP_NO_USER_LOW
!= count + 1)) {
ib::error() << "n recs wrong "
@ -2615,7 +2615,7 @@ n_owned_zero:
goto func_exit;
}
if (UNIV_UNLIKELY(page_header_get_field(page, PAGE_N_RECS)
if (UNIV_UNLIKELY(ulint(page_header_get_field(page, PAGE_N_RECS))
+ PAGE_HEAP_NO_USER_LOW
!= count + 1)) {
ib::error() << "n recs wrong "

View file

@ -786,8 +786,6 @@ rec_get_converted_size_comp_prefix_low(
it does not */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
const dtuple_t* v_entry,/*!< in: dtuple contains virtual column
data */
ulint* extra, /*!< out: extra size */
bool temp) /*!< in: whether this is a
temporary file record */
@ -795,20 +793,15 @@ rec_get_converted_size_comp_prefix_low(
ulint extra_size;
ulint data_size;
ulint i;
ulint n_null = (n_fields > 0) ? index->n_nullable : 0;
ulint n_v_fields;
ut_ad(n_fields > 0);
ut_ad(n_fields <= dict_index_get_n_fields(index));
ut_ad(!temp || extra);
/* At the time being, only temp file record could possible
store virtual columns */
ut_ad(!v_entry || (dict_index_is_clust(index) && temp));
n_v_fields = v_entry ? dtuple_get_n_v_fields(v_entry) : 0;
ut_d(ulint n_null = index->n_nullable);
extra_size = temp
? UT_BITS_IN_BYTES(n_null)
: REC_N_NEW_EXTRA_BYTES
+ UT_BITS_IN_BYTES(n_null);
? UT_BITS_IN_BYTES(index->n_nullable)
: REC_N_NEW_EXTRA_BYTES + UT_BITS_IN_BYTES(index->n_nullable);
data_size = 0;
if (temp && dict_table_is_comp(index->table)) {
@ -910,42 +903,50 @@ rec_get_converted_size_comp_prefix_low(
*extra = extra_size;
}
/* Log virtual columns */
if (n_v_fields != 0) {
/* length marker */
data_size += 2;
return(extra_size + data_size);
}
for (i = 0; i < n_v_fields; i++) {
dfield_t* vfield;
ulint flen;
/** Determine the converted size of virtual column data in a temporary file.
@see rec_convert_dtuple_to_temp_v()
@param[in] index clustered index
@param[in] v clustered index record augmented with the values
of virtual columns
@return size in bytes */
ulint
rec_get_converted_size_temp_v(const dict_index_t* index, const dtuple_t* v)
{
ut_ad(dict_index_is_clust(index));
const dict_v_col_t* col
= dict_table_get_nth_v_col(index->table, i);
/* length marker */
ulint data_size = 2;
const ulint n_v_fields = dtuple_get_n_v_fields(v);
/* Only those indexed needs to be logged */
if (col->m_col.ord_part) {
data_size += mach_get_compressed_size(
i + REC_MAX_N_FIELDS);
vfield = dtuple_get_nth_v_field(
v_entry, col->v_pos);
for (ulint i = 0; i < n_v_fields; i++) {
const dict_v_col_t* col
= dict_table_get_nth_v_col(index->table, i);
flen = vfield->len;
if (flen != UNIV_SQL_NULL) {
flen = ut_min(
flen,
static_cast<ulint>(
DICT_MAX_FIELD_LEN_BY_FORMAT(
index->table)));
data_size += flen;
}
data_size += mach_get_compressed_size(flen);
}
/* Only those indexed needs to be logged */
if (!col->m_col.ord_part) {
continue;
}
data_size += mach_get_compressed_size(i + REC_MAX_N_FIELDS);
const dfield_t* vfield = dtuple_get_nth_v_field(v, col->v_pos);
ulint flen = vfield->len;
if (flen != UNIV_SQL_NULL) {
flen = ut_min(
flen,
static_cast<ulint>(
DICT_MAX_FIELD_LEN_BY_FORMAT(
index->table)));
data_size += flen;
}
data_size += mach_get_compressed_size(flen);
}
return(extra_size + data_size);
return(data_size);
}
/**********************************************************//**
@ -961,7 +962,7 @@ rec_get_converted_size_comp_prefix(
{
ut_ad(dict_table_is_comp(index->table));
return(rec_get_converted_size_comp_prefix_low(
index, fields, n_fields, NULL, extra, false));
index, fields, n_fields, extra, false));
}
/**********************************************************//**
@ -1007,7 +1008,7 @@ rec_get_converted_size_comp(
}
return(size + rec_get_converted_size_comp_prefix_low(
index, fields, n_fields, NULL, extra, false));
index, fields, n_fields, extra, false));
}
/***********************************************************//**
@ -1190,8 +1191,6 @@ rec_convert_dtuple_to_rec_comp(
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
const dtuple_t* v_entry,/*!< in: dtuple contains
virtual column data */
ulint status, /*!< in: status bits of the record */
bool temp) /*!< in: whether to use the
format for temporary files in
@ -1207,10 +1206,11 @@ rec_convert_dtuple_to_rec_comp(
ulint n_node_ptr_field;
ulint fixed_len;
ulint null_mask = 1;
ulint n_null;
ulint num_v = v_entry ? dtuple_get_n_v_fields(v_entry) : 0;
ut_ad(n_fields > 0);
ut_ad(temp || dict_table_is_comp(index->table));
ulint n_null = index->n_nullable;
const ulint n_null_bytes = UT_BITS_IN_BYTES(n_null);
if (temp) {
ut_ad(status == REC_STATUS_ORDINARY);
@ -1223,8 +1223,6 @@ rec_convert_dtuple_to_rec_comp(
temp = false;
}
} else {
ut_ad(v_entry == NULL);
ut_ad(num_v == 0);
nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
@ -1250,13 +1248,9 @@ rec_convert_dtuple_to_rec_comp(
}
end = rec;
if (n_fields != 0) {
n_null = index->n_nullable;
lens = nulls - UT_BITS_IN_BYTES(n_null);
/* clear the SQL-null flags */
memset(lens + 1, 0, nulls - lens);
}
/* clear the SQL-null flags */
lens = nulls - n_null_bytes;
memset(lens + 1, 0, nulls - lens);
/* Store the data and the offsets */
@ -1351,13 +1345,25 @@ rec_convert_dtuple_to_rec_comp(
end += len;
}
}
}
if (!num_v) {
return;
}
/** Write indexed virtual column data into a temporary file.
@see rec_get_converted_size_temp_v()
@param[out] rec serialized record
@param[in] index clustered index
@param[in] v_entry clustered index record augmented with the values
of virtual columns */
void
rec_convert_dtuple_to_temp_v(
byte* rec,
const dict_index_t* index,
const dtuple_t* v_entry)
{
ut_ad(dict_index_is_clust(index));
const ulint num_v = dtuple_get_n_v_fields(v_entry);
/* reserve 2 bytes for writing length */
byte* ptr = end;
byte* ptr = rec;
ptr += 2;
/* Now log information on indexed virtual columns */
@ -1400,7 +1406,7 @@ rec_convert_dtuple_to_rec_comp(
}
}
mach_write_to_2(end, ptr - end);
mach_write_to_2(rec, ptr - rec);
}
/*********************************************************//**
@ -1426,8 +1432,7 @@ rec_convert_dtuple_to_rec_new(
rec = buf + extra_size;
rec_convert_dtuple_to_rec_comp(
rec, index, dtuple->fields, dtuple->n_fields, NULL,
status, false);
rec, index, dtuple->fields, dtuple->n_fields, status, false);
/* Set the info bits of the record */
rec_set_info_and_status_bits(rec, dtuple_get_info_bits(dtuple));
@ -1490,21 +1495,21 @@ rec_convert_dtuple_to_rec(
return(rec);
}
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
/** Determine the size of a data tuple prefix in a temporary file.
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[out] extra record header size
@return total size, in bytes */
ulint
rec_get_converted_size_temp(
/*========================*/
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
const dtuple_t* v_entry,/*!< in: dtuple contains virtual column
data */
ulint* extra) /*!< out: extra size */
const dict_index_t* index,
const dfield_t* fields,
ulint n_fields,
ulint* extra)
{
return(rec_get_converted_size_comp_prefix_low(
index, fields, n_fields, v_entry, extra, true));
index, fields, n_fields, extra, true));
}
/******************************************************//**
@ -1530,11 +1535,9 @@ rec_convert_dtuple_to_temp(
rec_t* rec, /*!< out: record */
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields, /*!< in: number of fields */
const dtuple_t* v_entry) /*!< in: dtuple contains
virtual column data */
ulint n_fields) /*!< in: number of fields */
{
rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields, v_entry,
rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields,
REC_STATUS_ORDINARY, true);
}

View file

@ -316,7 +316,7 @@ row_log_online_op(
extra_size+1 (and reserve 0 as the end-of-chunk marker). */
size = rec_get_converted_size_temp(
index, tuple->fields, tuple->n_fields, NULL, &extra_size);
index, tuple->fields, tuple->n_fields, &extra_size);
ut_ad(size >= extra_size);
ut_ad(size <= sizeof log->tail.buf);
@ -364,7 +364,7 @@ row_log_online_op(
}
rec_convert_dtuple_to_temp(
b + extra_size, index, tuple->fields, tuple->n_fields, NULL);
b + extra_size, index, tuple->fields, tuple->n_fields);
b += size;
if (mrec_size >= avail_size) {
@ -667,7 +667,7 @@ row_log_table_delete(
ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
old_pk, old_pk->n_fields - 1)->len);
old_pk_size = rec_get_converted_size_temp(
new_index, old_pk->fields, old_pk->n_fields, NULL,
new_index, old_pk->fields, old_pk->n_fields,
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
@ -697,9 +697,7 @@ row_log_table_delete(
/* Check if we need to log virtual column data */
if (ventry->n_v_fields > 0) {
ulint v_extra;
mrec_size += rec_get_converted_size_temp(
new_index, NULL, 0, ventry, &v_extra);
mrec_size += rec_get_converted_size_temp_v(new_index, ventry);
}
if (byte* b = row_log_table_open(index->online_log,
@ -713,7 +711,7 @@ row_log_table_delete(
rec_convert_dtuple_to_temp(
b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields, NULL);
old_pk->fields, old_pk->n_fields);
b += old_pk_size;
@ -746,8 +744,7 @@ row_log_table_delete(
/* log virtual columns */
if (ventry->n_v_fields > 0) {
rec_convert_dtuple_to_temp(
b, new_index, NULL, 0, ventry);
rec_convert_dtuple_to_temp_v(b, new_index, ventry);
b += mach_read_from_2(b);
}
@ -838,15 +835,16 @@ row_log_table_low_redundant(
}
size = rec_get_converted_size_temp(
index, tuple->fields, tuple->n_fields, ventry, &extra_size);
index, tuple->fields, tuple->n_fields, &extra_size);
ulint v_size = ventry
? rec_get_converted_size_temp_v(index, ventry) : 0;
mrec_size = ROW_LOG_HEADER_SIZE + size + (extra_size >= 0x80);
mrec_size = ROW_LOG_HEADER_SIZE + size + v_size + (extra_size >= 0x80);
if (num_v) {
if (o_ventry) {
ulint v_extra = 0;
mrec_size += rec_get_converted_size_temp(
index, NULL, 0, o_ventry, &v_extra);
mrec_size += rec_get_converted_size_temp_v(
index, o_ventry);
}
} else if (index->table->n_v_cols) {
mrec_size += 2;
@ -865,7 +863,7 @@ row_log_table_low_redundant(
old_pk_size = rec_get_converted_size_temp(
new_index, old_pk->fields, old_pk->n_fields,
ventry, &old_pk_extra_size);
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
mrec_size += 1/*old_pk_extra_size*/ + old_pk_size;
}
@ -879,8 +877,7 @@ row_log_table_low_redundant(
rec_convert_dtuple_to_temp(
b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields,
ventry);
old_pk->fields, old_pk->n_fields);
b += old_pk_size;
}
@ -893,14 +890,17 @@ row_log_table_low_redundant(
}
rec_convert_dtuple_to_temp(
b + extra_size, index, tuple->fields, tuple->n_fields,
ventry);
b + extra_size, index, tuple->fields, tuple->n_fields);
b += size;
if (ventry) {
rec_convert_dtuple_to_temp_v(b, new_index, ventry);
b += v_size;
}
if (num_v) {
if (o_ventry) {
rec_convert_dtuple_to_temp(
b, new_index, NULL, 0, o_ventry);
rec_convert_dtuple_to_temp_v(
b, new_index, o_ventry);
b += mach_read_from_2(b);
}
} else if (index->table->n_v_cols) {
@ -990,13 +990,11 @@ row_log_table_low(
+ (extra_size >= 0x80) + rec_offs_size(offsets) - omit_size;
if (ventry && ventry->n_v_fields > 0) {
ulint v_extra = 0;
mrec_size += rec_get_converted_size_temp(
new_index, NULL, 0, ventry, &v_extra);
mrec_size += rec_get_converted_size_temp_v(new_index, ventry);
if (o_ventry) {
mrec_size += rec_get_converted_size_temp(
new_index, NULL, 0, o_ventry, &v_extra);
mrec_size += rec_get_converted_size_temp_v(
new_index, o_ventry);
}
} else if (index->table->n_v_cols) {
/* Always leave 2 bytes length marker for virtual column
@ -1018,7 +1016,7 @@ row_log_table_low(
old_pk_size = rec_get_converted_size_temp(
new_index, old_pk->fields, old_pk->n_fields,
NULL, &old_pk_extra_size);
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
mrec_size += 1/*old_pk_extra_size*/ + old_pk_size;
}
@ -1032,8 +1030,7 @@ row_log_table_low(
rec_convert_dtuple_to_temp(
b + old_pk_extra_size, new_index,
old_pk->fields, old_pk->n_fields,
NULL);
old_pk->fields, old_pk->n_fields);
b += old_pk_size;
}
@ -1051,13 +1048,12 @@ row_log_table_low(
b += rec_offs_data_size(offsets);
if (ventry && ventry->n_v_fields > 0) {
rec_convert_dtuple_to_temp(
b, new_index, NULL, 0, ventry);
rec_convert_dtuple_to_temp_v(b, new_index, ventry);
b += mach_read_from_2(b);
if (o_ventry) {
rec_convert_dtuple_to_temp(
b, new_index, NULL, 0, o_ventry);
rec_convert_dtuple_to_temp_v(
b, new_index, o_ventry);
b += mach_read_from_2(b);
}
} else if (index->table->n_v_cols) {

View file

@ -411,7 +411,7 @@ row_merge_buf_encode(
ulint extra_size;
size = rec_get_converted_size_temp(
index, entry->fields, n_fields, NULL, &extra_size);
index, entry->fields, n_fields, &extra_size);
ut_ad(size >= extra_size);
/* Encode extra_size + 1 */
@ -424,7 +424,7 @@ row_merge_buf_encode(
}
rec_convert_dtuple_to_temp(*b + extra_size, index,
entry->fields, n_fields, NULL);
entry->fields, n_fields);
*b += size;
}
@ -897,7 +897,7 @@ row_merge_buf_add(
ulint extra;
size = rec_get_converted_size_temp(
index, entry->fields, n_fields, NULL, &extra);
index, entry->fields, n_fields, &extra);
ut_ad(data_size + extra_size == size);
ut_ad(extra_size == extra);

View file

@ -2194,14 +2194,6 @@ files_checked:
recv_sys->dblwr.pages.clear();
if (err == DB_SUCCESS && !srv_read_only_mode) {
log_mutex_enter();
if (log_sys->is_encrypted() && !log_crypt_init()) {
err = DB_ERROR;
}
log_mutex_exit();
}
if (err == DB_SUCCESS) {
/* Initialize the change buffer. */
err = dict_boot();
@ -2683,13 +2675,6 @@ files_checked:
fil_crypt_threads_init();
fil_system_exit();
/*
Create a checkpoint before logging anything new, so that
the current encryption key in use is definitely logged
before any log blocks encrypted with that key.
*/
log_make_checkpoint_at(LSN_MAX, TRUE);
/* Initialize online defragmentation. */
btr_defragment_init();
btr_defragment_thread_active = true;

View file

@ -619,6 +619,7 @@ trx_free_prepared(
|| (trx_state_eq(trx, TRX_STATE_ACTIVE)
&& trx->is_recovered
&& (!srv_was_started
|| srv_operation == SRV_OPERATION_RESTORE
|| srv_read_only_mode
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO)));
ut_a(trx->magic_n == TRX_MAGIC_N);

View file

@ -402,12 +402,20 @@ analyze table t1;
-- enable_query_log
# index_merge on first table in join
if ($index_merge_random_rows_in_EXPLAIN)
{
--replace_column 9 #
}
explain select * from t0 left join t1 on (t0.key1=t1.key1)
where t0.key1=3 or t0.key2=4;
select * from t0 left join t1 on (t0.key1=t1.key1)
where t0.key1=3 or t0.key2=4;
if ($index_merge_random_rows_in_EXPLAIN)
{
--replace_column 9 #
}
explain
select * from t0,t1 where (t0.key1=t1.key1) and ( t0.key1=3 or t0.key2=4);

View file

@ -250,8 +250,8 @@ insert into t1 select * from t0;
explain select * from t0 left join t1 on (t0.key1=t1.key1)
where t0.key1=3 or t0.key2=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where
1 SIMPLE t1 ref i1 i1 4 test.t0.key1 2
1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using union(i1,i2); Using where
1 SIMPLE t1 ref i1 i1 4 test.t0.key1 #
select * from t0 left join t1 on (t0.key1=t1.key1)
where t0.key1=3 or t0.key2=4;
key1 key2 key3 key4 key5 key6 key7 key8 key1 key2 key3 key4 key5 key6 key7 key8
@ -260,8 +260,8 @@ key1 key2 key3 key4 key5 key6 key7 key8 key1 key2 key3 key4 key5 key6 key7 key8
explain
select * from t0,t1 where (t0.key1=t1.key1) and ( t0.key1=3 or t0.key2=4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where
1 SIMPLE t1 ref i1 i1 4 test.t0.key1 2
1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL # Using union(i1,i2); Using where
1 SIMPLE t1 ref i1 i1 4 test.t0.key1 #
explain
select * from t0,t1 where (t0.key1=t1.key1) and
(t0.key1=3 or t0.key2<4) and t1.key1=2;

View file

@ -1584,7 +1584,7 @@ int json_escape(CHARSET_INFO *str_cs,
enum json_esc_char_classes c_class;
str+= c_len;
if (c_chr > 0x60 || (c_class= json_escape_chr_map[c_chr]) == ESC_)
if (c_chr >= 0x60 || (c_class= json_escape_chr_map[c_chr]) == ESC_)
{
if ((c_len= json_cs->cset->wc_mb(json_cs, c_chr, json, json_end)) > 0)
{