mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 03:52:35 +01:00
Merge 10.7 into 10.8
This commit is contained in:
commit
c75e3770dc
22 changed files with 256 additions and 103 deletions
|
@ -31,7 +31,7 @@ extern ulong my_time_to_wait_for_lock;
|
|||
#include <signal.h>
|
||||
#ifdef HAVE_SIGHANDLER_T
|
||||
#define sig_return sighandler_t
|
||||
#elif defined(SOLARIS) || defined(__sun) || defined(__APPLE__) || defined(__FreeBSD__)
|
||||
#elif defined(SOLARIS) || defined(__sun) || defined(__APPLE__) || defined(__FreeBSD__) || defined(_AIX)
|
||||
typedef void (*sig_return)(int); /* Returns type from signal */
|
||||
#else
|
||||
typedef void (*sig_return)(void); /* Returns type from signal */
|
||||
|
|
|
@ -39,6 +39,28 @@ MDL_INTENTION_EXCLUSIVE Schema metadata lock test
|
|||
select * from t1;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
backup unlock;
|
||||
connection con1;
|
||||
connection default;
|
||||
#
|
||||
# Check that BACKUP LOCK blocks some operations
|
||||
#
|
||||
create sequence seq1;
|
||||
create sequence seq2;
|
||||
backup lock seq1;
|
||||
connection con1;
|
||||
CREATE OR REPLACE SEQUENCE seq1 START -28;
|
||||
ERROR HY000: Sequence 'test.seq1' values are conflicting
|
||||
SET STATEMENT max_statement_time=10 FOR CREATE OR REPLACE SEQUENCE seq1 START 50;
|
||||
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
|
||||
SET STATEMENT max_statement_time=10 FOR ALTER SEQUENCE IF EXISTS seq1 NOMAXVALUE;
|
||||
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
|
||||
SET STATEMENT max_statement_time=10 FOR ALTER SEQUENCE IF EXISTS seq1 MAXVALUE 1000;
|
||||
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
|
||||
SET STATEMENT max_statement_time=10 for rename table seq2 to seq3, seq3 to seq1;
|
||||
ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
|
||||
connection default;
|
||||
backup unlock;
|
||||
drop table seq1,seq2;
|
||||
#
|
||||
# BACKUP LOCK and BACKUP UNLOCK are not allowed in procedures.
|
||||
#
|
||||
|
@ -141,7 +163,6 @@ ERROR HY000: Can't execute the given command because you have active locked tabl
|
|||
SET STATEMENT max_statement_time=180 FOR BACKUP LOCK test.u;
|
||||
# restart
|
||||
#
|
||||
connection con1;
|
||||
connection default;
|
||||
disconnect con1;
|
||||
show tables;
|
||||
|
|
|
@ -43,10 +43,39 @@ SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.me
|
|||
--error ER_LOCK_DEADLOCK
|
||||
select * from t1;
|
||||
backup unlock;
|
||||
connection con1;
|
||||
--reap
|
||||
connection default;
|
||||
|
||||
--echo #
|
||||
--echo # Check that BACKUP LOCK blocks some operations
|
||||
--echo #
|
||||
|
||||
# These test has to be done with timeouts as we want to ensure that the tables
|
||||
# doesn't change
|
||||
|
||||
create sequence seq1;
|
||||
create sequence seq2;
|
||||
backup lock seq1;
|
||||
connection con1;
|
||||
--error ER_SEQUENCE_INVALID_DATA
|
||||
CREATE OR REPLACE SEQUENCE seq1 START -28;
|
||||
--error ER_STATEMENT_TIMEOUT
|
||||
SET STATEMENT max_statement_time=10 FOR CREATE OR REPLACE SEQUENCE seq1 START 50;
|
||||
--error ER_STATEMENT_TIMEOUT
|
||||
SET STATEMENT max_statement_time=10 FOR ALTER SEQUENCE IF EXISTS seq1 NOMAXVALUE;
|
||||
--error ER_STATEMENT_TIMEOUT
|
||||
SET STATEMENT max_statement_time=10 FOR ALTER SEQUENCE IF EXISTS seq1 MAXVALUE 1000;
|
||||
--error ER_STATEMENT_TIMEOUT
|
||||
SET STATEMENT max_statement_time=10 for rename table seq2 to seq3, seq3 to seq1;
|
||||
connection default;
|
||||
backup unlock;
|
||||
drop table seq1,seq2;
|
||||
|
||||
--echo #
|
||||
--echo # BACKUP LOCK and BACKUP UNLOCK are not allowed in procedures.
|
||||
--echo #
|
||||
|
||||
delimiter |;
|
||||
--error ER_SP_BADSTATEMENT
|
||||
CREATE PROCEDURE p_BACKUP_LOCK()
|
||||
|
@ -162,8 +191,6 @@ SET STATEMENT max_statement_time=180 FOR BACKUP LOCK test.u;
|
|||
|
||||
--echo #
|
||||
|
||||
connection con1;
|
||||
--reap
|
||||
connection default;
|
||||
disconnect con1;
|
||||
show tables;
|
||||
|
|
|
@ -4028,6 +4028,43 @@ drop table t1;
|
|||
# End of 10.1 tests
|
||||
#
|
||||
#
|
||||
# MDEV-27442 Wrong result upon query with DISTINCT and EXISTS subquery
|
||||
#
|
||||
CREATE TABLE t1 (a int, b int, KEY b (b,a)) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (0,100),(2,100),(2,101),(3,102);
|
||||
# Must not use Using index for group-by
|
||||
explain SELECT DISTINCT b FROM t1 WHERE EXISTS ( SELECT 1 FROM DUAL WHERE a > 1 );
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 index NULL b 10 NULL 4 Using where; Using index
|
||||
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
SELECT DISTINCT b FROM t1 WHERE EXISTS ( SELECT 1 FROM DUAL WHERE a > 1 );
|
||||
b
|
||||
100
|
||||
101
|
||||
102
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-26585 Wrong query results when `using index for group-by`
|
||||
#
|
||||
CREATE TABLE `t1` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`owner_id` int(11) DEFAULT NULL,
|
||||
`foo` tinyint(1) DEFAULT 0,
|
||||
`whatever` varchar(255) DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `index_t1_on_owner_id_and_foo` (`owner_id`,`foo`)
|
||||
) engine=InnoDB DEFAULT CHARSET=utf8;
|
||||
INSERT INTO t1 (owner_id, foo, whatever)
|
||||
VALUES (1, TRUE, "yello"), (1, FALSE, "yello"), (2, TRUE, "yello"),
|
||||
(2, TRUE, "yello"), (2, FALSE, "yello");
|
||||
EXPLAIN SELECT DISTINCT owner_id FROM t1 WHERE foo = true GROUP BY owner_id HAVING (COUNT(*) = 1);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL index_t1_on_owner_id_and_foo 7 NULL 5 Using where; Using index
|
||||
SELECT DISTINCT owner_id FROM t1 WHERE foo = true GROUP BY owner_id HAVING (COUNT(*) = 1);
|
||||
owner_id
|
||||
1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# MDEV-24353: Adding GROUP BY slows down a query
|
||||
#
|
||||
CREATE TABLE t1 (p int NOT NULL, a int NOT NULL, PRIMARY KEY (p,a));
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
--source include/default_optimizer_switch.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
--source include/have_innodb.inc
|
||||
#
|
||||
# TODO:
|
||||
# Add queries with:
|
||||
|
@ -1691,6 +1691,37 @@ drop table t1;
|
|||
--echo # End of 10.1 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-27442 Wrong result upon query with DISTINCT and EXISTS subquery
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a int, b int, KEY b (b,a)) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (0,100),(2,100),(2,101),(3,102);
|
||||
--echo # Must not use Using index for group-by
|
||||
explain SELECT DISTINCT b FROM t1 WHERE EXISTS ( SELECT 1 FROM DUAL WHERE a > 1 );
|
||||
SELECT DISTINCT b FROM t1 WHERE EXISTS ( SELECT 1 FROM DUAL WHERE a > 1 );
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-26585 Wrong query results when `using index for group-by`
|
||||
--echo #
|
||||
|
||||
CREATE TABLE `t1` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`owner_id` int(11) DEFAULT NULL,
|
||||
`foo` tinyint(1) DEFAULT 0,
|
||||
`whatever` varchar(255) DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `index_t1_on_owner_id_and_foo` (`owner_id`,`foo`)
|
||||
) engine=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
INSERT INTO t1 (owner_id, foo, whatever)
|
||||
VALUES (1, TRUE, "yello"), (1, FALSE, "yello"), (2, TRUE, "yello"),
|
||||
(2, TRUE, "yello"), (2, FALSE, "yello");
|
||||
EXPLAIN SELECT DISTINCT owner_id FROM t1 WHERE foo = true GROUP BY owner_id HAVING (COUNT(*) = 1);
|
||||
SELECT DISTINCT owner_id FROM t1 WHERE foo = true GROUP BY owner_id HAVING (COUNT(*) = 1);
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-24353: Adding GROUP BY slows down a query
|
||||
--echo #
|
||||
|
|
|
@ -15,7 +15,7 @@ SET DEBUG_SYNC='now WAIT_FOR before_fragment';
|
|||
SET GLOBAL wsrep_cluster_address = '';
|
||||
SET DEBUG_SYNC = 'now SIGNAL continue';
|
||||
connection node_2;
|
||||
ERROR HY000: Lost connection to MySQL server during query
|
||||
ERROR HY000: Lost connection to server during query
|
||||
connection node_2a;
|
||||
SELECT * FROM mysql.wsrep_streaming_log;
|
||||
node_uuid trx_id seqno flags frag
|
||||
|
|
|
@ -5,6 +5,7 @@ c INT,
|
|||
INDEX(b))
|
||||
ENGINE=InnoDB STATS_PERSISTENT=0;
|
||||
SET GLOBAL innodb_change_buffering_debug = 1;
|
||||
SET GLOBAL innodb_change_buffering=all;
|
||||
INSERT INTO t1 SELECT 0,'x',1 FROM seq_1_to_1024;
|
||||
# restart: --innodb-force-recovery=6 --innodb-change-buffer-dump
|
||||
check table t1;
|
||||
|
|
|
@ -13,6 +13,7 @@ c INT,
|
|||
INDEX(b))
|
||||
ENGINE=InnoDB STATS_PERSISTENT=0;
|
||||
SET GLOBAL innodb_change_buffering_debug = 1;
|
||||
SET GLOBAL innodb_change_buffering = all;
|
||||
INSERT INTO t1 SELECT 0,'x',1 FROM seq_1_to_8192;
|
||||
BEGIN;
|
||||
SELECT b FROM t1 LIMIT 3;
|
||||
|
|
|
@ -24,6 +24,7 @@ ENGINE=InnoDB STATS_PERSISTENT=0;
|
|||
# change buffering is possible, so that the change buffer will be used
|
||||
# whenever possible.
|
||||
SET GLOBAL innodb_change_buffering_debug = 1;
|
||||
SET GLOBAL innodb_change_buffering=all;
|
||||
|
||||
# Create enough rows for the table, so that the change buffer will be
|
||||
# used for modifying the secondary index page. There must be multiple
|
||||
|
|
|
@ -33,6 +33,7 @@ ENGINE=InnoDB STATS_PERSISTENT=0;
|
|||
# change buffering is possible, so that the change buffer will be used
|
||||
# whenever possible.
|
||||
SET GLOBAL innodb_change_buffering_debug = 1;
|
||||
SET GLOBAL innodb_change_buffering = all;
|
||||
let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err;
|
||||
|
||||
# Create enough rows for the table, so that the change buffer will be
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
SET @start_global_value = @@global.innodb_change_buffering;
|
||||
SELECT @start_global_value;
|
||||
@start_global_value
|
||||
all
|
||||
none
|
||||
Valid values are 'all', 'deletes', 'changes', 'inserts', 'none', 'purges'
|
||||
select @@global.innodb_change_buffering in ('all', 'deletes', 'changes', 'inserts', 'none', 'purges');
|
||||
@@global.innodb_change_buffering in ('all', 'deletes', 'changes', 'inserts', 'none', 'purges')
|
||||
1
|
||||
select @@global.innodb_change_buffering;
|
||||
@@global.innodb_change_buffering
|
||||
all
|
||||
none
|
||||
select @@session.innodb_change_buffering;
|
||||
ERROR HY000: Variable 'innodb_change_buffering' is a GLOBAL variable
|
||||
show global variables like 'innodb_change_buffering';
|
||||
Variable_name Value
|
||||
innodb_change_buffering all
|
||||
innodb_change_buffering none
|
||||
show session variables like 'innodb_change_buffering';
|
||||
Variable_name Value
|
||||
innodb_change_buffering all
|
||||
innodb_change_buffering none
|
||||
select * from information_schema.global_variables where variable_name='innodb_change_buffering';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_CHANGE_BUFFERING all
|
||||
INNODB_CHANGE_BUFFERING none
|
||||
select * from information_schema.session_variables where variable_name='innodb_change_buffering';
|
||||
VARIABLE_NAME VARIABLE_VALUE
|
||||
INNODB_CHANGE_BUFFERING all
|
||||
INNODB_CHANGE_BUFFERING none
|
||||
set global innodb_change_buffering='none';
|
||||
select @@global.innodb_change_buffering;
|
||||
@@global.innodb_change_buffering
|
||||
|
@ -62,4 +62,4 @@ ERROR 42000: Variable 'innodb_change_buffering' can't be set to the value of 'so
|
|||
SET @@global.innodb_change_buffering = @start_global_value;
|
||||
SELECT @@global.innodb_change_buffering;
|
||||
@@global.innodb_change_buffering
|
||||
all
|
||||
none
|
||||
|
|
|
@ -227,7 +227,7 @@ READ_ONLY NO
|
|||
COMMAND_LINE_ARGUMENT OPTIONAL
|
||||
VARIABLE_NAME INNODB_CHANGE_BUFFERING
|
||||
SESSION_VALUE NULL
|
||||
DEFAULT_VALUE all
|
||||
DEFAULT_VALUE none
|
||||
VARIABLE_SCOPE GLOBAL
|
||||
VARIABLE_TYPE ENUM
|
||||
VARIABLE_COMMENT Buffer changes to secondary indexes.
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#ifndef MAIN
|
||||
|
||||
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__linux__) || defined(__sun) || defined(_WIN32)
|
||||
static my_bool memcpy_and_test(uchar *to, uchar *from, uint len)
|
||||
{
|
||||
uint i, res= 1;
|
||||
|
@ -32,6 +33,7 @@ static my_bool memcpy_and_test(uchar *to, uchar *from, uint len)
|
|||
res= 0;
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) || defined(__FreeBSD__)
|
||||
#include <net/ethernet.h>
|
||||
|
@ -195,4 +197,3 @@ int main(int argc __attribute__((unused)),char **argv)
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2018, 2021, MariaDB Corporation.
|
||||
/* Copyright (c) 2018, 2022, MariaDB Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
@ -257,9 +257,13 @@ static bool backup_flush(THD *thd)
|
|||
This will probably require a callback from the InnoDB code.
|
||||
*/
|
||||
|
||||
/* Retry to get inital lock for 0.1 + 0.5 + 2.25 + 11.25 + 56.25 = 70.35 sec */
|
||||
#define MAX_RETRY_COUNT 5
|
||||
|
||||
static bool backup_block_ddl(THD *thd)
|
||||
{
|
||||
PSI_stage_info org_stage;
|
||||
uint sleep_time;
|
||||
DBUG_ENTER("backup_block_ddl");
|
||||
|
||||
kill_delayed_threads();
|
||||
|
@ -302,18 +306,33 @@ static bool backup_block_ddl(THD *thd)
|
|||
block new DDL's, in addition to all previous blocks
|
||||
We didn't do this lock above, as we wanted DDL's to be executed while
|
||||
we wait for non transactional tables (which may take a while).
|
||||
|
||||
We do this lock in a loop as we can get a deadlock if there are multi-object
|
||||
ddl statements like
|
||||
RENAME TABLE t1 TO t2, t3 TO t3
|
||||
and the MDL happens in the middle of it.
|
||||
*/
|
||||
THD_STAGE_INFO(thd, stage_waiting_for_ddl);
|
||||
if (thd->mdl_context.upgrade_shared_lock(backup_flush_ticket,
|
||||
MDL_BACKUP_WAIT_DDL,
|
||||
thd->variables.lock_wait_timeout))
|
||||
sleep_time= 100; // Start with 0.1 seconds
|
||||
for (uint i= 0 ; i <= MAX_RETRY_COUNT ; i++)
|
||||
{
|
||||
/*
|
||||
Could be a timeout. Downgrade lock to what is was before this function
|
||||
was called so that this function can be called again
|
||||
*/
|
||||
backup_flush_ticket->downgrade_lock(MDL_BACKUP_FLUSH);
|
||||
goto err;
|
||||
if (!thd->mdl_context.upgrade_shared_lock(backup_flush_ticket,
|
||||
MDL_BACKUP_WAIT_DDL,
|
||||
thd->variables.lock_wait_timeout))
|
||||
break;
|
||||
if (thd->get_stmt_da()->sql_errno() != ER_LOCK_DEADLOCK || thd->killed ||
|
||||
i == MAX_RETRY_COUNT)
|
||||
{
|
||||
/*
|
||||
Could be a timeout. Downgrade lock to what is was before this function
|
||||
was called so that this function can be called again
|
||||
*/
|
||||
backup_flush_ticket->downgrade_lock(MDL_BACKUP_FLUSH);
|
||||
goto err;
|
||||
}
|
||||
thd->clear_error(); // Forget the DEADLOCK error
|
||||
my_sleep(sleep_time);
|
||||
sleep_time*= 5; // Wait a bit longer next time
|
||||
}
|
||||
|
||||
/* There can't be anything more that needs to be logged to ddl log */
|
||||
|
|
|
@ -14112,7 +14112,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
|
|||
key_part_range[1]= last_part;
|
||||
|
||||
/* Check if cur_part is referenced in the WHERE clause. */
|
||||
if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
|
||||
if (join->conds->walk(&Item::find_item_in_field_list_processor, true,
|
||||
key_part_range))
|
||||
{
|
||||
cause= "keypart reference from where clause";
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2021, MariaDB Corporation.
|
||||
Copyright (c) 2009, 2022, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
|
@ -1728,7 +1728,8 @@ bool JOIN::prepare_stage2()
|
|||
#endif
|
||||
if (select_lex->olap == ROLLUP_TYPE && rollup_init())
|
||||
goto err;
|
||||
if (alloc_func_list())
|
||||
if (alloc_func_list() ||
|
||||
make_sum_func_list(all_fields, fields_list, false))
|
||||
goto err;
|
||||
|
||||
res= FALSE;
|
||||
|
@ -2359,7 +2360,21 @@ JOIN::optimize_inner()
|
|||
If all items were resolved by opt_sum_query, there is no need to
|
||||
open any tables.
|
||||
*/
|
||||
if ((res=opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds)))
|
||||
|
||||
/*
|
||||
The following resetting and restoring of sum_funcs is needed to
|
||||
go around a bug in spider where it assumes that
|
||||
make_sum_func_list() has not been called yet and do logical
|
||||
choices based on this if special handling of min/max functions should
|
||||
be done. We disable this special handling while we are trying to find
|
||||
out if we can replace MIN/MAX values with constants.
|
||||
*/
|
||||
Item_sum **save_func_sums= sum_funcs, *tmp_sum_funcs= 0;
|
||||
sum_funcs= &tmp_sum_funcs;
|
||||
res= opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds);
|
||||
sum_funcs= save_func_sums;
|
||||
|
||||
if (res)
|
||||
{
|
||||
DBUG_ASSERT(res >= 0);
|
||||
if (res == HA_ERR_KEY_NOT_FOUND)
|
||||
|
@ -2957,18 +2972,14 @@ int JOIN::optimize_stage2()
|
|||
}
|
||||
|
||||
/*
|
||||
Remove ORDER BY in the following cases:
|
||||
- GROUP BY is more specific. Example GROUP BY a, b ORDER BY a
|
||||
- If there are aggregate functions and no GROUP BY, this always leads
|
||||
to one row result, no point in sorting.
|
||||
We can ignore ORDER BY if it's a prefix of the GROUP BY list
|
||||
(as MariaDB is by default sorting on GROUP BY) or
|
||||
if there is no GROUP BY and aggregate functions are used
|
||||
(as the result will only contain one row).
|
||||
*/
|
||||
if (test_if_subpart(group_list, order) ||
|
||||
(!group_list && tmp_table_param.sum_func_count))
|
||||
{
|
||||
order= 0;
|
||||
if (is_indexed_agg_distinct(this, NULL))
|
||||
sort_and_group= 0;
|
||||
}
|
||||
if (order && (test_if_subpart(group_list, order) ||
|
||||
(!group_list && tmp_table_param.sum_func_count)))
|
||||
order=0;
|
||||
|
||||
// Can't use sort on head table if using join buffering
|
||||
if (full_join || hash_join)
|
||||
|
@ -3000,7 +3011,6 @@ int JOIN::optimize_stage2()
|
|||
if (select_lex->have_window_funcs())
|
||||
simple_order= FALSE;
|
||||
|
||||
|
||||
/*
|
||||
If the hint FORCE INDEX FOR ORDER BY/GROUP BY is used for the table
|
||||
whose columns are required to be returned in a sorted order, then
|
||||
|
@ -3733,7 +3743,7 @@ bool JOIN::make_aggr_tables_info()
|
|||
// for the first table
|
||||
if (group_list || tmp_table_param.sum_func_count)
|
||||
{
|
||||
if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true, true))
|
||||
if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true))
|
||||
DBUG_RETURN(true);
|
||||
if (prepare_sum_aggregators(thd, sum_funcs,
|
||||
!join_tab->is_using_agg_loose_index_scan()))
|
||||
|
@ -3843,7 +3853,7 @@ bool JOIN::make_aggr_tables_info()
|
|||
last_tab->all_fields= &tmp_all_fields3;
|
||||
last_tab->fields= &tmp_fields_list3;
|
||||
}
|
||||
if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true, true))
|
||||
if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true))
|
||||
DBUG_RETURN(true);
|
||||
if (prepare_sum_aggregators(thd, sum_funcs,
|
||||
!join_tab ||
|
||||
|
@ -4060,8 +4070,6 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields,
|
|||
}
|
||||
else
|
||||
{
|
||||
if (make_sum_func_list(all_fields, fields_list, false))
|
||||
goto err;
|
||||
if (prepare_sum_aggregators(thd, sum_funcs,
|
||||
!join_tab->is_using_agg_loose_index_scan()))
|
||||
goto err;
|
||||
|
@ -7314,8 +7322,7 @@ void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
|
|||
Check for the presence of AGGFN(DISTINCT a) queries that may be subject
|
||||
to loose index scan.
|
||||
|
||||
|
||||
Check if the query is a subject to AGGFN(DISTINCT) using loose index scan
|
||||
Check if the query is a subject to AGGFN(DISTINCT) using loose index scan
|
||||
(QUICK_GROUP_MIN_MAX_SELECT).
|
||||
Optionally (if out_args is supplied) will push the arguments of
|
||||
AGGFN(DISTINCT) to the list
|
||||
|
@ -7348,14 +7355,11 @@ is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args)
|
|||
Item_sum **sum_item_ptr;
|
||||
bool result= false;
|
||||
|
||||
if (join->table_count != 1 || /* reference more than 1 table */
|
||||
if (join->table_count != 1 || /* reference more than 1 table */
|
||||
join->select_distinct || /* or a DISTINCT */
|
||||
join->select_lex->olap == ROLLUP_TYPE) /* Check (B3) for ROLLUP */
|
||||
return false;
|
||||
|
||||
if (join->make_sum_func_list(join->all_fields, join->fields_list, true))
|
||||
return false;
|
||||
|
||||
Bitmap<MAX_FIELDS> first_aggdistinct_fields;
|
||||
bool first_aggdistinct_fields_initialized= false;
|
||||
for (sum_item_ptr= join->sum_funcs; *sum_item_ptr; sum_item_ptr++)
|
||||
|
@ -7457,16 +7461,23 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
|
|||
while ((item= select_items_it++))
|
||||
item->walk(&Item::collect_item_field_processor, 0, &indexed_fields);
|
||||
}
|
||||
else if (join->tmp_table_param.sum_func_count &&
|
||||
is_indexed_agg_distinct(join, &indexed_fields))
|
||||
else if (!join->tmp_table_param.sum_func_count ||
|
||||
!is_indexed_agg_distinct(join, &indexed_fields))
|
||||
{
|
||||
join->sort_and_group= 1;
|
||||
}
|
||||
else
|
||||
/*
|
||||
There where no GROUP BY fields and also either no aggregate
|
||||
functions or not all aggregate functions where used with the
|
||||
same DISTINCT (or MIN() / MAX() that works similarly).
|
||||
Nothing to do there.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (indexed_fields.elements == 0)
|
||||
{
|
||||
/* There where no index we could use to satisfy the GROUP BY */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Intersect the keys of all group fields. */
|
||||
cur_item= indexed_fields_it++;
|
||||
|
@ -25951,16 +25962,13 @@ bool JOIN::alloc_func_list()
|
|||
|
||||
bool JOIN::make_sum_func_list(List<Item> &field_list,
|
||||
List<Item> &send_result_set_metadata,
|
||||
bool before_group_by, bool recompute)
|
||||
bool before_group_by)
|
||||
{
|
||||
List_iterator_fast<Item> it(field_list);
|
||||
Item_sum **func;
|
||||
Item *item;
|
||||
DBUG_ENTER("make_sum_func_list");
|
||||
|
||||
if (*sum_funcs && !recompute)
|
||||
DBUG_RETURN(FALSE); /* We have already initialized sum_funcs. */
|
||||
|
||||
func= sum_funcs;
|
||||
while ((item=it++))
|
||||
{
|
||||
|
@ -26107,7 +26115,7 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
|
|||
Change all funcs to be fields in tmp table.
|
||||
|
||||
@param thd THD pointer
|
||||
@param ref_pointer_array array of pointers to top elements of filed list
|
||||
@param ref_pointer_array array of pointers to top elements of field list
|
||||
@param res_selected_fields new list of items of select item list
|
||||
@param res_all_fields new list of all items
|
||||
@param elements number of elements in select item list
|
||||
|
|
|
@ -1206,7 +1206,17 @@ public:
|
|||
Indicates that grouping will be performed on the result set during
|
||||
query execution. This field belongs to query execution.
|
||||
|
||||
@see make_group_fields, alloc_group_fields, JOIN::exec
|
||||
If 'sort_and_group' is set, then the optimizer is going to use on of
|
||||
the following algorithms to resolve GROUP BY.
|
||||
|
||||
- If one table, sort the table and then calculate groups on the fly.
|
||||
- If more than one table, create a temporary table to hold the join,
|
||||
sort it and then resolve group by on the fly.
|
||||
|
||||
The 'on the fly' calculation is done in end_send_group()
|
||||
|
||||
@see make_group_fields, alloc_group_fields, JOIN::exec,
|
||||
setup_end_select_func
|
||||
*/
|
||||
bool sort_and_group;
|
||||
bool first_record,full_join, no_field_update;
|
||||
|
@ -1585,7 +1595,7 @@ public:
|
|||
bool make_range_rowid_filters();
|
||||
bool init_range_rowid_filters();
|
||||
bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields,
|
||||
bool before_group_by, bool recompute= FALSE);
|
||||
bool before_group_by);
|
||||
|
||||
/// Initialzes a slice, see comments for ref_ptrs above.
|
||||
Ref_ptr_array ref_ptr_array_slice(size_t slice_num)
|
||||
|
|
|
@ -19369,7 +19369,7 @@ static MYSQL_SYSVAR_BOOL(numa_interleave, srv_numa_interleave,
|
|||
static MYSQL_SYSVAR_ENUM(change_buffering, innodb_change_buffering,
|
||||
PLUGIN_VAR_RQCMDARG,
|
||||
"Buffer changes to secondary indexes.",
|
||||
NULL, NULL, IBUF_USE_ALL, &innodb_change_buffering_typelib);
|
||||
NULL, NULL, IBUF_USE_NONE, &innodb_change_buffering_typelib);
|
||||
|
||||
static MYSQL_SYSVAR_UINT(change_buffer_max_size,
|
||||
srv_change_buffer_max_size,
|
||||
|
|
|
@ -624,10 +624,6 @@ private:
|
|||
@param type extended record subtype; @see mrec_ext_t */
|
||||
inline void log_write_extended(const buf_block_t &block, byte type);
|
||||
|
||||
/** Prepare to write the mini-transaction log to the redo log buffer.
|
||||
@return number of bytes to write in finish_write() */
|
||||
inline size_t prepare_write();
|
||||
|
||||
/** Write a FILE_MODIFY record when a non-predefined persistent
|
||||
tablespace was modified for the first time since fil_names_clear(). */
|
||||
ATTRIBUTE_NOINLINE ATTRIBUTE_COLD void name_write();
|
||||
|
@ -635,6 +631,10 @@ private:
|
|||
/** Encrypt the log */
|
||||
ATTRIBUTE_NOINLINE void encrypt();
|
||||
|
||||
/** Append the redo log records to the redo log buffer.
|
||||
@return {start_lsn,flush_ahead} */
|
||||
std::pair<lsn_t,page_flush_ahead> do_write();
|
||||
|
||||
/** Append the redo log records to the redo log buffer.
|
||||
@param len number of bytes to write
|
||||
@return {start_lsn,flush_ahead} */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2017, 2021, MariaDB Corporation.
|
||||
Copyright (c) 2017, 2022, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
@ -33,7 +33,8 @@ inline bool mtr_t::is_block_dirtied(const buf_block_t *block)
|
|||
ut_ad(block->page.in_file());
|
||||
ut_ad(block->page.frame);
|
||||
ut_ad(block->page.buf_fix_count());
|
||||
return block->page.oldest_modification() <= 1;
|
||||
return block->page.oldest_modification() <= 1 &&
|
||||
block->page.id().space() < SRV_TMP_SPACE_ID;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,8 +53,8 @@ mtr_t::memo_push(void* object, mtr_memo_type_t type)
|
|||
grab log_sys.flush_order_mutex at mtr_t::commit() so that we
|
||||
can insert the dirtied page into the flush list. */
|
||||
|
||||
if ((type == MTR_MEMO_PAGE_X_FIX || type == MTR_MEMO_PAGE_SX_FIX)
|
||||
&& !m_made_dirty) {
|
||||
if (!m_made_dirty
|
||||
&& (type == MTR_MEMO_PAGE_X_FIX || type == MTR_MEMO_PAGE_SX_FIX)) {
|
||||
|
||||
m_made_dirty = is_block_dirtied(
|
||||
reinterpret_cast<const buf_block_t*>(object));
|
||||
|
|
|
@ -616,11 +616,6 @@ inline lsn_t log_t::write_buf() noexcept
|
|||
/* Do the write to the log file */
|
||||
log_write_buf(write_buf, length, offset);
|
||||
write_lsn= lsn;
|
||||
if (srv_file_flush_method == SRV_O_DSYNC)
|
||||
{
|
||||
flushed_to_disk_lsn.store(lsn, std::memory_order_release);
|
||||
log_flush_notify(lsn);
|
||||
}
|
||||
}
|
||||
|
||||
return write_lock.release(lsn);
|
||||
|
@ -630,7 +625,7 @@ inline bool log_t::flush(lsn_t lsn) noexcept
|
|||
{
|
||||
ut_ad(lsn >= get_flushed_lsn());
|
||||
flush_lock.set_pending(lsn);
|
||||
const bool success{log.flush()};
|
||||
const bool success{srv_file_flush_method == SRV_O_DSYNC || log.flush()};
|
||||
if (UNIV_LIKELY(success))
|
||||
{
|
||||
flushed_to_disk_lsn.store(lsn, std::memory_order_release);
|
||||
|
@ -646,10 +641,7 @@ inline bool log_t::flush(lsn_t lsn) noexcept
|
|||
static lsn_t log_flush(lsn_t lsn)
|
||||
{
|
||||
ut_ad(!log_sys.is_pmem());
|
||||
|
||||
if (srv_file_flush_method != SRV_O_DSYNC)
|
||||
ut_a(log_sys.flush(lsn));
|
||||
|
||||
ut_a(log_sys.flush(lsn));
|
||||
DBUG_EXECUTE_IF("crash_after_log_write_upto", DBUG_SUICIDE(););
|
||||
return flush_lock.release(lsn);
|
||||
}
|
||||
|
|
|
@ -401,18 +401,27 @@ void mtr_t::commit()
|
|||
|
||||
std::pair<lsn_t,page_flush_ahead> lsns;
|
||||
|
||||
if (const auto len= prepare_write())
|
||||
lsns= finish_write(len);
|
||||
if (UNIV_LIKELY(m_log_mode == MTR_LOG_ALL))
|
||||
{
|
||||
lsns= do_write();
|
||||
|
||||
if (m_made_dirty)
|
||||
mysql_mutex_lock(&log_sys.flush_order_mutex);
|
||||
|
||||
/* It is now safe to release log_sys.mutex because the
|
||||
buf_pool.flush_order_mutex will ensure that we are the first one
|
||||
to insert into buf_pool.flush_list. */
|
||||
mysql_mutex_unlock(&log_sys.mutex);
|
||||
}
|
||||
else
|
||||
{
|
||||
ut_ad(m_log_mode == MTR_LOG_NO_REDO);
|
||||
ut_ad(m_log.size() == 0);
|
||||
m_commit_lsn= log_sys.get_lsn();
|
||||
lsns= { m_commit_lsn, PAGE_FLUSH_NO };
|
||||
|
||||
if (m_made_dirty)
|
||||
mysql_mutex_lock(&log_sys.flush_order_mutex);
|
||||
|
||||
/* It is now safe to release log_sys.mutex because the
|
||||
buf_pool.flush_order_mutex will ensure that we are the first one
|
||||
to insert into buf_pool.flush_list. */
|
||||
mysql_mutex_unlock(&log_sys.mutex);
|
||||
if (UNIV_UNLIKELY(m_made_dirty)) /* This should be IMPORT TABLESPACE */
|
||||
mysql_mutex_lock(&log_sys.flush_order_mutex);
|
||||
}
|
||||
|
||||
if (m_freed_pages)
|
||||
{
|
||||
|
@ -515,7 +524,7 @@ void mtr_t::commit_shrink(fil_space_t &space)
|
|||
|
||||
log_write_and_flush_prepare();
|
||||
|
||||
const lsn_t start_lsn= finish_write(prepare_write()).first;
|
||||
const lsn_t start_lsn= do_write().first;
|
||||
|
||||
mysql_mutex_lock(&log_sys.flush_order_mutex);
|
||||
/* Durably write the reduced FSP_SIZE before truncating the data file. */
|
||||
|
@ -850,17 +859,10 @@ static mtr_t::page_flush_ahead log_close(lsn_t lsn) noexcept
|
|||
return mtr_t::PAGE_FLUSH_SYNC;
|
||||
}
|
||||
|
||||
inline size_t mtr_t::prepare_write()
|
||||
std::pair<lsn_t,mtr_t::page_flush_ahead> mtr_t::do_write()
|
||||
{
|
||||
ut_ad(!recv_no_log_write);
|
||||
if (UNIV_UNLIKELY(m_log_mode != MTR_LOG_ALL))
|
||||
{
|
||||
ut_ad(m_log_mode == MTR_LOG_NO_REDO);
|
||||
ut_ad(m_log.size() == 0);
|
||||
mysql_mutex_lock(&log_sys.mutex);
|
||||
m_commit_lsn= log_sys.get_lsn();
|
||||
return 0;
|
||||
}
|
||||
ut_ad(m_log_mode == MTR_LOG_ALL);
|
||||
|
||||
size_t len= m_log.size() + 5;
|
||||
ut_ad(len > 5);
|
||||
|
@ -883,7 +885,7 @@ inline size_t mtr_t::prepare_write()
|
|||
!m_user_space->max_lsn)
|
||||
name_write();
|
||||
|
||||
return len;
|
||||
return finish_write(len);
|
||||
}
|
||||
|
||||
/** Write the mini-transaction log to the redo log buffer.
|
||||
|
|
Loading…
Reference in a new issue