mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 20:12:31 +01:00
Merge
This commit is contained in:
commit
0719df781e
16 changed files with 10815 additions and 46 deletions
|
@ -143,6 +143,7 @@ ANALYZE
|
|||
"attached_condition": "(tbl2.b < 60)"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"r_filtered": 100
|
||||
}
|
||||
|
@ -180,6 +181,7 @@ ANALYZE
|
|||
"attached_condition": "(tbl2.b < 60)"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "(tbl1.c > tbl2.c)",
|
||||
"r_filtered": 15.833
|
||||
|
|
5234
mysql-test/r/analyze_stmt_privileges2.result
Normal file
5234
mysql-test/r/analyze_stmt_privileges2.result
Normal file
File diff suppressed because it is too large
Load diff
|
@ -365,6 +365,7 @@ EXPLAIN
|
|||
"attached_condition": "(tbl2.b < 5)"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "(tbl2.a = tbl1.a)"
|
||||
}
|
||||
|
@ -627,6 +628,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL"
|
||||
}
|
||||
}
|
||||
|
@ -660,6 +662,7 @@ EXPLAIN
|
|||
"first_match": "t2"
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "((t1.b = t2.b) and (t1.a = t2.a))"
|
||||
}
|
||||
|
@ -696,6 +699,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "((t1.b = t2.b) and (t1.a = t2.a))"
|
||||
}
|
||||
|
@ -808,6 +812,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "((t2.b <> outer_t1.a) and trigcond(((<cache>(outer_t1.a) = t1.a) or isnull(t1.a))))"
|
||||
}
|
||||
|
@ -858,6 +863,7 @@ EXPLAIN
|
|||
"filtered": 100
|
||||
},
|
||||
"buffer_type": "flat",
|
||||
"buffer_size": "128Kb",
|
||||
"join_type": "BNL",
|
||||
"attached_condition": "(tbl2.b = tbl1.b)"
|
||||
}
|
||||
|
|
83
mysql-test/r/explain_json_format_partitions.result
Normal file
83
mysql-test/r/explain_json_format_partitions.result
Normal file
|
@ -0,0 +1,83 @@
|
|||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null
|
||||
) partition by key(a);
|
||||
insert into t1 select a from t2;
|
||||
explain partitions select * from t1 where a in (2,3,4);
|
||||
id select_type table partitions type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 p0 ALL NULL NULL NULL NULL 10 Using where
|
||||
explain format=json select * from t1 where a in (2,3,4);
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"filtered": 100,
|
||||
"attached_condition": "(t1.a in (2,3,4))"
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json select * from t1 where a in (2,3,4);
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"r_loops": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"r_loops": 1,
|
||||
"rows": 10,
|
||||
"r_rows": 10,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"filtered": 100,
|
||||
"r_filtered": 30,
|
||||
"attached_condition": "(t1.a in (2,3,4))"
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json update t1 set a=a+10 where a in (2,3,4);
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"update": 1,
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"r_rows": 3,
|
||||
"r_filtered": 100,
|
||||
"using_io_buffer": 1,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"attached_condition": "(t1.a in (2,3,4))"
|
||||
}
|
||||
}
|
||||
}
|
||||
analyze format=json delete from t1 where a in (20,30,40);
|
||||
ANALYZE
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"delete": 1,
|
||||
"table_name": "t1",
|
||||
"partitions": ["p0"],
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"r_rows": 10,
|
||||
"r_filtered": 0,
|
||||
"r_total_time_ms": "REPLACED",
|
||||
"attached_condition": "(t1.a in (20,30,40))"
|
||||
}
|
||||
}
|
||||
}
|
||||
drop table t1,t2;
|
|
@ -44,14 +44,14 @@ a
|
|||
1
|
||||
2
|
||||
include/stop_slave.inc
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200";
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200,0-1-300";
|
||||
include/wait_for_slave_to_start.inc
|
||||
Using_Gtid = 'Current_Pos'
|
||||
Until_Condition = 'Gtid'
|
||||
INSERT INTO t1 VALUES (3);
|
||||
DELETE FROM t1 WHERE a=3;
|
||||
include/stop_slave.inc
|
||||
include/start_slave.inc
|
||||
*** Test UNTIL condition in an earlier binlog than the start GTID. ***
|
||||
include/stop_slave.inc
|
||||
SET gtid_domain_id = 1;
|
||||
INSERT INTO t1 VALUES (3);
|
||||
SET gtid_domain_id = 2;
|
||||
|
|
|
@ -73,19 +73,29 @@ SELECT * FROM t1 ORDER BY a;
|
|||
|
||||
# Test showing the UNTIL condition in SHOW SLAVE STATUS.
|
||||
--source include/stop_slave.inc
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200";
|
||||
START SLAVE UNTIL master_gtid_pos = "1-10-100,2-20-200,0-1-300";
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
--let $status_items= Using_Gtid,Until_Condition
|
||||
--source include/show_slave_status.inc
|
||||
|
||||
# Clear the UNTIL condition.
|
||||
# Note that we need to wait for a transaction to get through from the master.
|
||||
# Otherwise the IO thread may still be in get_master_version_and_clock()
|
||||
# (wait_for_slave_to_start.inc returns as soon as the IO thread is connected),
|
||||
# and we can get test failures from warnings in the log about IO thread being
|
||||
# killed in the middle of setting @@gtid_strict_mode or similar (MDEV-7940).
|
||||
--connection server_1
|
||||
INSERT INTO t1 VALUES (3);
|
||||
DELETE FROM t1 WHERE a=3;
|
||||
--save_master_pos
|
||||
|
||||
--connection server_2
|
||||
--sync_with_master
|
||||
--source include/stop_slave.inc
|
||||
--source include/start_slave.inc
|
||||
|
||||
|
||||
--echo *** Test UNTIL condition in an earlier binlog than the start GTID. ***
|
||||
--connection server_2
|
||||
--source include/stop_slave.inc
|
||||
|
||||
--connection server_1
|
||||
SET gtid_domain_id = 1;
|
||||
|
|
5400
mysql-test/t/analyze_stmt_privileges2.test
Normal file
5400
mysql-test/t/analyze_stmt_privileges2.test
Normal file
File diff suppressed because it is too large
Load diff
17
mysql-test/t/explain_json_format_partitions.test
Normal file
17
mysql-test/t/explain_json_format_partitions.test
Normal file
|
@ -0,0 +1,17 @@
|
|||
|
||||
--source include/have_partition.inc
|
||||
create table t2(a int);
|
||||
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t1 (
|
||||
a int not null
|
||||
) partition by key(a);
|
||||
insert into t1 select a from t2;
|
||||
explain partitions select * from t1 where a in (2,3,4);
|
||||
explain format=json select * from t1 where a in (2,3,4);
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json select * from t1 where a in (2,3,4);
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json update t1 set a=a+10 where a in (2,3,4);
|
||||
--replace_regex /"r_total_time_ms": [0-9]*[.]?[0-9]*/"r_total_time_ms": "REPLACED"/
|
||||
analyze format=json delete from t1 where a in (20,30,40);
|
||||
drop table t1,t2;
|
|
@ -121,7 +121,8 @@ void Update_plan::save_explain_data_intern(MEM_ROOT *mem_root,
|
|||
partition_info *part_info;
|
||||
if ((part_info= table->part_info))
|
||||
{
|
||||
make_used_partitions_str(part_info, &explain->used_partitions);
|
||||
make_used_partitions_str(mem_root, part_info, &explain->used_partitions,
|
||||
explain->used_partitions_list);
|
||||
explain->used_partitions_set= true;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -37,6 +37,18 @@ Explain_query::Explain_query(THD *thd_arg, MEM_ROOT *root) :
|
|||
{
|
||||
}
|
||||
|
||||
static void print_json_array(Json_writer *writer,
|
||||
const char *title, String_list &list)
|
||||
{
|
||||
List_iterator_fast<char> it(list);
|
||||
const char *name;
|
||||
writer->add_member(title).start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
|
||||
|
||||
|
||||
Explain_query::~Explain_query()
|
||||
{
|
||||
|
@ -1276,17 +1288,9 @@ void add_json_keyset(Json_writer *writer, const char *elem_name,
|
|||
String_list *keyset)
|
||||
{
|
||||
if (!keyset->is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(*keyset);
|
||||
const char *name;
|
||||
writer->add_member(elem_name).start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
print_json_array(writer, elem_name, *keyset);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
@param fs_tracker Normally NULL. When not NULL, it means that the join tab
|
||||
used filesort.
|
||||
|
@ -1345,7 +1349,10 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
writer->add_member("table").start_object();
|
||||
|
||||
writer->add_member("table_name").add_str(table_name);
|
||||
// partitions
|
||||
|
||||
if (used_partitions_set)
|
||||
print_json_array(writer, "partitions", used_partitions_list);
|
||||
|
||||
writer->add_member("access_type").add_str(join_type_str[type]);
|
||||
|
||||
add_json_keyset(writer, "possible_keys", &possible_keys);
|
||||
|
@ -1374,14 +1381,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
parts_list= &key.key_parts_list;
|
||||
|
||||
if (parts_list && !parts_list->is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(*parts_list);
|
||||
const char *name;
|
||||
writer->add_member("used_key_parts").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
print_json_array(writer, "used_key_parts", *parts_list);
|
||||
|
||||
if (quick_info && !quick_info->is_basic())
|
||||
{
|
||||
|
@ -1392,14 +1392,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
|
||||
/* `ref` */
|
||||
if (!ref_list.is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(ref_list);
|
||||
const char *str;
|
||||
writer->add_member("ref").start_array();
|
||||
while ((str= it++))
|
||||
writer->add_str(str);
|
||||
writer->end_array();
|
||||
}
|
||||
print_json_array(writer, "ref", ref_list);
|
||||
|
||||
/* r_loops (not present in tabular output) */
|
||||
if (is_analyze)
|
||||
|
@ -1480,6 +1473,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
|
|||
writer->end_object(); // "block-nl-join"
|
||||
writer->add_member("buffer_type").add_str(bka_type.incremental?
|
||||
"incremental":"flat");
|
||||
writer->add_member("buffer_size").add_size(bka_type.join_buffer_size);
|
||||
writer->add_member("join_type").add_str(bka_type.join_alg);
|
||||
if (bka_type.mrr_type.length())
|
||||
writer->add_member("mrr_type").add_str(bka_type.mrr_type);
|
||||
|
@ -1674,12 +1668,7 @@ void Explain_quick_select::print_json(Json_writer *writer)
|
|||
|
||||
writer->add_member("key").add_str(range.get_key_name());
|
||||
|
||||
List_iterator_fast<char> it(range.key_parts_list);
|
||||
const char *name;
|
||||
writer->add_member("used_key_parts").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
print_json_array(writer, "used_key_parts", range.key_parts_list);
|
||||
|
||||
writer->end_object();
|
||||
}
|
||||
|
@ -1990,6 +1979,10 @@ void Explain_update::print_explain_json(Explain_query *query,
|
|||
writer->add_member("delete").add_ll(1);
|
||||
|
||||
writer->add_member("table_name").add_str(table_name);
|
||||
|
||||
if (used_partitions_set)
|
||||
print_json_array(writer, "partitions", used_partitions_list);
|
||||
|
||||
writer->add_member("access_type").add_str(join_type_str[jtype]);
|
||||
|
||||
if (!possible_keys.is_empty())
|
||||
|
|
|
@ -51,6 +51,9 @@ it into the slow query log.
|
|||
|
||||
*/
|
||||
|
||||
#ifndef SQL_EXPLAIN_INCLUDED
|
||||
#define SQL_EXPLAIN_INCLUDED
|
||||
|
||||
class String_list: public List<char>
|
||||
{
|
||||
public:
|
||||
|
@ -471,6 +474,8 @@ class EXPLAIN_BKA_TYPE
|
|||
public:
|
||||
EXPLAIN_BKA_TYPE() : join_alg(NULL) {}
|
||||
|
||||
size_t join_buffer_size;
|
||||
|
||||
bool incremental;
|
||||
|
||||
/*
|
||||
|
@ -609,6 +614,7 @@ public:
|
|||
/* id and 'select_type' are cared-of by the parent Explain_select */
|
||||
StringBuffer<32> table_name;
|
||||
StringBuffer<32> used_partitions;
|
||||
String_list used_partitions_list;
|
||||
// valid with ET_USING_MRR
|
||||
StringBuffer<32> mrr_type;
|
||||
StringBuffer<32> firstmatch_table_name;
|
||||
|
@ -732,6 +738,7 @@ public:
|
|||
const char *select_type;
|
||||
|
||||
StringBuffer<32> used_partitions;
|
||||
String_list used_partitions_list;
|
||||
bool used_partitions_set;
|
||||
|
||||
bool impossible_where;
|
||||
|
@ -842,3 +849,4 @@ public:
|
|||
};
|
||||
|
||||
|
||||
#endif //SQL_EXPLAIN_INCLUDED
|
||||
|
|
|
@ -2582,6 +2582,8 @@ void JOIN_CACHE::save_explain_data(EXPLAIN_BKA_TYPE *explain)
|
|||
{
|
||||
explain->incremental= MY_TEST(prev_cache);
|
||||
|
||||
explain->join_buffer_size= get_join_buffer_size();
|
||||
|
||||
switch (get_join_alg()) {
|
||||
case BNL_JOIN_ALG:
|
||||
explain->join_alg= "BNL";
|
||||
|
|
|
@ -68,6 +68,7 @@
|
|||
// mysql_*_alter_copy_data
|
||||
#include "opt_range.h" // store_key_image_to_rec
|
||||
#include "sql_alter.h" // Alter_table_ctx
|
||||
#include "sql_select.h"
|
||||
|
||||
#include <algorithm>
|
||||
using std::max;
|
||||
|
@ -7290,8 +7291,10 @@ void mem_alloc_error(size_t size)
|
|||
/**
|
||||
Return comma-separated list of used partitions in the provided given string.
|
||||
|
||||
@param mem_root Where to allocate following list
|
||||
@param part_info Partitioning info
|
||||
@param[out] parts The resulting list of string to fill
|
||||
@param[out] used_partitions_list result list to fill
|
||||
|
||||
Generate a list of used partitions (from bits in part_info->read_partitions
|
||||
bitmap), and store it into the provided String object.
|
||||
|
@ -7302,7 +7305,10 @@ void mem_alloc_error(size_t size)
|
|||
that was written or locked.
|
||||
*/
|
||||
|
||||
void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
||||
void make_used_partitions_str(MEM_ROOT *alloc,
|
||||
partition_info *part_info,
|
||||
String *parts_str,
|
||||
String_list &used_partitions_list)
|
||||
{
|
||||
parts_str->length(0);
|
||||
partition_element *pe;
|
||||
|
@ -7321,6 +7327,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||
{
|
||||
if (parts_str->length())
|
||||
parts_str->append(',');
|
||||
uint index= parts_str->length();
|
||||
parts_str->append(head_pe->partition_name,
|
||||
strlen(head_pe->partition_name),
|
||||
system_charset_info);
|
||||
|
@ -7328,6 +7335,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||
parts_str->append(pe->partition_name,
|
||||
strlen(pe->partition_name),
|
||||
system_charset_info);
|
||||
used_partitions_list.append_str(alloc, parts_str->ptr() + index);
|
||||
}
|
||||
partition_id++;
|
||||
}
|
||||
|
@ -7341,6 +7349,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
|
|||
{
|
||||
if (parts_str->length())
|
||||
parts_str->append(',');
|
||||
used_partitions_list.append_str(alloc, pe->partition_name);
|
||||
parts_str->append(pe->partition_name, strlen(pe->partition_name),
|
||||
system_charset_info);
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ typedef struct {
|
|||
uint32 end_part;
|
||||
} part_id_range;
|
||||
|
||||
class String_list;
|
||||
struct st_partition_iter;
|
||||
#define NOT_A_PARTITION_ID UINT_MAX32
|
||||
|
||||
|
@ -114,7 +115,9 @@ bool mysql_unpack_partition(THD *thd, char *part_buf,
|
|||
TABLE *table, bool is_create_table_ind,
|
||||
handlerton *default_db_type,
|
||||
bool *work_part_info_used);
|
||||
void make_used_partitions_str(partition_info *part_info, String *parts_str);
|
||||
void make_used_partitions_str(MEM_ROOT *mem_root,
|
||||
partition_info *part_info, String *parts_str,
|
||||
String_list &used_partitions_list);
|
||||
uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
|
||||
bool left_endpoint,
|
||||
bool include_endpoint);
|
||||
|
|
|
@ -23503,8 +23503,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
partition_info *part_info;
|
||||
if (!table->derived_select_number &&
|
||||
(part_info= table->part_info))
|
||||
{
|
||||
make_used_partitions_str(part_info, &eta->used_partitions);
|
||||
{ //TODO: all thd->mem_root here should be fixed
|
||||
make_used_partitions_str(thd->mem_root, part_info, &eta->used_partitions,
|
||||
eta->used_partitions_list);
|
||||
eta->used_partitions_set= true;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -517,7 +517,7 @@ int mysql_update(THD *thd,
|
|||
*/
|
||||
if (thd->lex->describe)
|
||||
goto produce_explain_and_leave;
|
||||
explain= query_plan.save_explain_update_data(thd->mem_root, thd);
|
||||
explain= query_plan.save_explain_update_data(query_plan.mem_root, thd);
|
||||
|
||||
ANALYZE_START_TRACKING(&explain->command_tracker);
|
||||
|
||||
|
@ -1052,7 +1052,7 @@ produce_explain_and_leave:
|
|||
We come here for various "degenerate" query plans: impossible WHERE,
|
||||
no-partitions-used, impossible-range, etc.
|
||||
*/
|
||||
query_plan.save_explain_update_data(thd->mem_root, thd);
|
||||
query_plan.save_explain_update_data(query_plan.mem_root, thd);
|
||||
|
||||
emit_explain_and_leave:
|
||||
int err2= thd->lex->explain->send_explain(thd);
|
||||
|
|
Loading…
Reference in a new issue