mirror of
https://github.com/MariaDB/server.git
synced 2025-04-13 02:35:32 +02:00
Merge ../10.1-explain-json-r4 into 10.1
This commit is contained in:
commit
3d5f97fd70
21 changed files with 1335 additions and 155 deletions
|
@ -100,6 +100,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
|
|||
../sql/rpl_reporting.cc
|
||||
../sql/sql_expression_cache.cc
|
||||
../sql/my_apc.cc ../sql/my_apc.h
|
||||
../sql/my_json_writer.cc ../sql/my_json_writer.h
|
||||
../sql/rpl_gtid.cc
|
||||
../sql/sql_explain.cc ../sql/sql_explain.h
|
||||
../sql/compat56.cc
|
||||
|
|
178
mysql-test/r/explain_json.result
Normal file
178
mysql-test/r/explain_json.result
Normal file
|
@ -0,0 +1,178 @@
|
|||
drop table if exists t0,t1;
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
explain format=json select * from t0;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t0",
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"filtered": 100
|
||||
}
|
||||
}
|
||||
}
|
||||
explain format=json select * from t0 where 1>2;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"message": "Impossible WHERE"
|
||||
}
|
||||
}
|
||||
}
|
||||
explain format=json select * from t0 where a<3;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t0",
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"filtered": 100,
|
||||
"attached_condition": "(t0.a < 3)"
|
||||
}
|
||||
}
|
||||
}
|
||||
# Try a basic join
|
||||
create table t1 (a int, b int, filler char(32), key(a));
|
||||
insert into t1
|
||||
select
|
||||
a.a + b.a* 10 + c.a * 100,
|
||||
a.a + b.a* 10 + c.a * 100,
|
||||
'filler'
|
||||
from t0 a, t0 b, t0 c;
|
||||
explain format=json select * from t0,t1 where t1.a=t0.a;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t0",
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"filtered": 100,
|
||||
"attached_condition": "(t0.a is not null)"
|
||||
},
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ref",
|
||||
"possible_keys": ["a"],
|
||||
"key": "a",
|
||||
"key_length": "5",
|
||||
"used_key_parts": ["a"],
|
||||
"ref": ["test.t0.a"],
|
||||
"rows": 1,
|
||||
"filtered": 100
|
||||
}
|
||||
}
|
||||
}
|
||||
# Try range and index_merge
|
||||
create table t2 (a1 int, a2 int, b1 int, b2 int, key(a1,a2), key(b1,b2));
|
||||
insert into t2 select a,a,a,a from t1;
|
||||
explain format=json select * from t2 where a1<5;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t2",
|
||||
"access_type": "range",
|
||||
"possible_keys": ["a1"],
|
||||
"key": "a1",
|
||||
"key_length": "5",
|
||||
"used_key_parts": ["a1"],
|
||||
"rows": 5,
|
||||
"filtered": 100,
|
||||
"index_condition": "(t2.a1 < 5)"
|
||||
}
|
||||
}
|
||||
}
|
||||
explain format=json select * from t2 where a1=1 or b1=2;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t2",
|
||||
"access_type": "index_merge",
|
||||
"possible_keys": ["a1", "b1"],
|
||||
"key_length": "5,5",
|
||||
"index_merge": {
|
||||
"sort_union": {
|
||||
"range": {
|
||||
"key": "a1",
|
||||
"used_key_parts": ["a1"]
|
||||
},
|
||||
"range": {
|
||||
"key": "b1",
|
||||
"used_key_parts": ["b1"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"rows": 2,
|
||||
"filtered": 100,
|
||||
"attached_condition": "((t2.a1 = 1) or (t2.b1 = 2))"
|
||||
}
|
||||
}
|
||||
}
|
||||
explain format=json select * from t2 where a1=1 or (b1=2 and b2=3);
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t2",
|
||||
"access_type": "index_merge",
|
||||
"possible_keys": ["a1", "b1"],
|
||||
"key_length": "5,10",
|
||||
"index_merge": {
|
||||
"sort_union": {
|
||||
"range": {
|
||||
"key": "a1",
|
||||
"used_key_parts": ["a1"]
|
||||
},
|
||||
"range": {
|
||||
"key": "b1",
|
||||
"used_key_parts": ["b1", "b2"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"rows": 2,
|
||||
"filtered": 100,
|
||||
"attached_condition": "((t2.a1 = 1) or ((t2.b1 = 2) and (t2.b2 = 3)))"
|
||||
}
|
||||
}
|
||||
}
|
||||
# Try ref access on two key components
|
||||
explain format=json select * from t0,t2 where t2.b1=t0.a and t2.b2=4;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"table": {
|
||||
"table_name": "t0",
|
||||
"access_type": "ALL",
|
||||
"rows": 10,
|
||||
"filtered": 100,
|
||||
"attached_condition": "(t0.a is not null)"
|
||||
},
|
||||
"table": {
|
||||
"table_name": "t2",
|
||||
"access_type": "ref",
|
||||
"possible_keys": ["b1"],
|
||||
"key": "b1",
|
||||
"key_length": "10",
|
||||
"used_key_parts": ["b1", "b2"],
|
||||
"ref": ["test.t0.a", "const"],
|
||||
"rows": 1,
|
||||
"filtered": 100
|
||||
}
|
||||
}
|
||||
}
|
||||
drop table t1,t2;
|
||||
drop table t0;
|
42
mysql-test/t/explain_json.test
Normal file
42
mysql-test/t/explain_json.test
Normal file
|
@ -0,0 +1,42 @@
|
|||
#
|
||||
# EXPLAIN FORMAT=JSON tests. These are tests developed for MariaDB.
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists t0,t1;
|
||||
--enable_warnings
|
||||
|
||||
create table t0(a int);
|
||||
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
|
||||
explain format=json select * from t0;
|
||||
|
||||
explain format=json select * from t0 where 1>2;
|
||||
|
||||
explain format=json select * from t0 where a<3;
|
||||
|
||||
--echo # Try a basic join
|
||||
create table t1 (a int, b int, filler char(32), key(a));
|
||||
insert into t1
|
||||
select
|
||||
a.a + b.a* 10 + c.a * 100,
|
||||
a.a + b.a* 10 + c.a * 100,
|
||||
'filler'
|
||||
from t0 a, t0 b, t0 c;
|
||||
|
||||
explain format=json select * from t0,t1 where t1.a=t0.a;
|
||||
|
||||
--echo # Try range and index_merge
|
||||
create table t2 (a1 int, a2 int, b1 int, b2 int, key(a1,a2), key(b1,b2));
|
||||
insert into t2 select a,a,a,a from t1;
|
||||
|
||||
explain format=json select * from t2 where a1<5;
|
||||
|
||||
explain format=json select * from t2 where a1=1 or b1=2;
|
||||
explain format=json select * from t2 where a1=1 or (b1=2 and b2=3);
|
||||
|
||||
--echo # Try ref access on two key components
|
||||
|
||||
explain format=json select * from t0,t2 where t2.b1=t0.a and t2.b2=4;
|
||||
|
||||
drop table t1,t2;
|
||||
drop table t0;
|
|
@ -112,6 +112,7 @@ SET (SQL_SOURCE
|
|||
threadpool_common.cc
|
||||
../sql-common/mysql_async.c
|
||||
my_apc.cc my_apc.h
|
||||
my_json_writer.cc my_json_writer.h
|
||||
rpl_gtid.cc rpl_parallel.cc
|
||||
${WSREP_SOURCES}
|
||||
table_cache.cc
|
||||
|
|
|
@ -2504,7 +2504,13 @@ void Item_ident::print(String *str, enum_query_type query_type)
|
|||
}
|
||||
if (db_name && db_name[0] && !alias_name_used)
|
||||
{
|
||||
if (!(cached_table && cached_table->belong_to_view &&
|
||||
/*
|
||||
When printing EXPLAIN, don't print database name when it's the same as
|
||||
current database.
|
||||
*/
|
||||
bool skip_db= (query_type & QT_EXPLAIN) && !strcmp(thd->db, db_name);
|
||||
if (!skip_db &&
|
||||
!(cached_table && cached_table->belong_to_view &&
|
||||
cached_table->belong_to_view->compact_view_format))
|
||||
{
|
||||
append_identifier(thd, str, d_name, (uint)strlen(d_name));
|
||||
|
|
|
@ -242,6 +242,7 @@ static SYMBOL symbols[] = {
|
|||
{ "FOR", SYM(FOR_SYM)},
|
||||
{ "FORCE", SYM(FORCE_SYM)},
|
||||
{ "FOREIGN", SYM(FOREIGN)},
|
||||
{ "FORMAT", SYM(FORMAT_SYM)},
|
||||
{ "FOUND", SYM(FOUND_SYM)},
|
||||
{ "FROM", SYM(FROM)},
|
||||
{ "FULL", SYM(FULL)},
|
||||
|
|
310
sql/my_json_writer.cc
Normal file
310
sql/my_json_writer.cc
Normal file
|
@ -0,0 +1,310 @@
|
|||
/* Todo: SkySQL copyrights */
|
||||
|
||||
#include <my_global.h>
|
||||
#include "sql_priv.h"
|
||||
#include "sql_string.h"
|
||||
|
||||
#include "my_json_writer.h"
|
||||
|
||||
void Json_writer::append_indent()
|
||||
{
|
||||
if (!document_start)
|
||||
output.append('\n');
|
||||
for (int i=0; i< indent_level; i++)
|
||||
output.append(' ');
|
||||
}
|
||||
|
||||
void Json_writer::start_object()
|
||||
{
|
||||
fmt_helper.on_start_object();
|
||||
|
||||
if (!element_started)
|
||||
start_element();
|
||||
|
||||
output.append("{");
|
||||
indent_level+=INDENT_SIZE;
|
||||
first_child=true;
|
||||
element_started= false;
|
||||
document_start= false;
|
||||
}
|
||||
|
||||
void Json_writer::start_array()
|
||||
{
|
||||
if (fmt_helper.on_start_array())
|
||||
return;
|
||||
|
||||
if (!element_started)
|
||||
start_element();
|
||||
|
||||
output.append("[");
|
||||
indent_level+=INDENT_SIZE;
|
||||
first_child=true;
|
||||
element_started= false;
|
||||
document_start= false;
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::end_object()
|
||||
{
|
||||
indent_level-=INDENT_SIZE;
|
||||
if (!first_child)
|
||||
append_indent();
|
||||
output.append("}");
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::end_array()
|
||||
{
|
||||
if (fmt_helper.on_end_array())
|
||||
return;
|
||||
indent_level-=INDENT_SIZE;
|
||||
if (!first_child)
|
||||
append_indent();
|
||||
output.append("]");
|
||||
}
|
||||
|
||||
|
||||
Json_writer& Json_writer::add_member(const char *name)
|
||||
{
|
||||
if (fmt_helper.on_add_member(name))
|
||||
return *this; // handled
|
||||
|
||||
// assert that we are in an object
|
||||
DBUG_ASSERT(!element_started);
|
||||
start_element();
|
||||
|
||||
output.append('"');
|
||||
output.append(name);
|
||||
output.append("\": ");
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* Used by formatting helper to print something that is formatted by the helper. */
|
||||
void Json_writer::start_sub_element()
|
||||
{
|
||||
//element_started= true;
|
||||
if (first_child)
|
||||
first_child= false;
|
||||
else
|
||||
output.append(',');
|
||||
|
||||
append_indent();
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::start_element()
|
||||
{
|
||||
element_started= true;
|
||||
|
||||
if (first_child)
|
||||
first_child= false;
|
||||
else
|
||||
output.append(',');
|
||||
|
||||
append_indent();
|
||||
}
|
||||
|
||||
void Json_writer::add_ll(longlong val)
|
||||
{
|
||||
char buf[64];
|
||||
my_snprintf(buf, sizeof(buf), "%ld", val);
|
||||
add_unquoted_str(buf);
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::add_double(double val)
|
||||
{
|
||||
char buf[64];
|
||||
my_snprintf(buf, sizeof(buf), "%lg", val);
|
||||
add_unquoted_str(buf);
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::add_bool(bool val)
|
||||
{
|
||||
add_unquoted_str(val? "true" : "false");
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::add_unquoted_str(const char* str)
|
||||
{
|
||||
if (fmt_helper.on_add_str(str))
|
||||
return;
|
||||
|
||||
if (!element_started)
|
||||
start_element();
|
||||
|
||||
output.append(str);
|
||||
element_started= false;
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::add_str(const char *str)
|
||||
{
|
||||
if (fmt_helper.on_add_str(str))
|
||||
return;
|
||||
|
||||
if (!element_started)
|
||||
start_element();
|
||||
|
||||
output.append('"');
|
||||
output.append(str);
|
||||
output.append('"');
|
||||
element_started= false;
|
||||
}
|
||||
|
||||
|
||||
void Json_writer::add_str(const String &str)
|
||||
{
|
||||
add_str(str.ptr());
|
||||
}
|
||||
|
||||
|
||||
bool Single_line_formatting_helper::on_add_member(const char *name)
|
||||
{
|
||||
DBUG_ASSERT(state== INACTIVE || state == DISABLED);
|
||||
if (state != DISABLED)
|
||||
{
|
||||
// remove everything from the array
|
||||
buf_ptr= buffer;
|
||||
|
||||
//append member name to the array
|
||||
size_t len= strlen(name);
|
||||
if (len < MAX_LINE_LEN)
|
||||
{
|
||||
memcpy(buf_ptr, name, len);
|
||||
buf_ptr+=len;
|
||||
*(buf_ptr++)= 0;
|
||||
|
||||
line_len= owner->indent_level + len + 1;
|
||||
state= ADD_MEMBER;
|
||||
return true; // handled
|
||||
}
|
||||
}
|
||||
return false; // not handled
|
||||
}
|
||||
|
||||
bool Single_line_formatting_helper::on_start_array()
|
||||
{
|
||||
if (state == ADD_MEMBER)
|
||||
{
|
||||
state= IN_ARRAY;
|
||||
return true; // handled
|
||||
}
|
||||
else
|
||||
{
|
||||
state= INACTIVE;
|
||||
// TODO: what if we have accumulated some stuff already? shouldn't we
|
||||
// flush it?
|
||||
return false; // not handled
|
||||
}
|
||||
}
|
||||
|
||||
bool Single_line_formatting_helper::on_end_array()
|
||||
{
|
||||
if (state == IN_ARRAY)
|
||||
{
|
||||
flush_on_one_line();
|
||||
state= INACTIVE;
|
||||
return true; // handled
|
||||
}
|
||||
return false; // not handled
|
||||
}
|
||||
|
||||
void Single_line_formatting_helper::on_start_object()
|
||||
{
|
||||
// Nested objects will not be printed on one line
|
||||
disable_and_flush();
|
||||
}
|
||||
|
||||
bool Single_line_formatting_helper::on_add_str(const char *str)
|
||||
{
|
||||
if (state == IN_ARRAY)
|
||||
{
|
||||
size_t len= strlen(str);
|
||||
|
||||
// New length will be:
|
||||
// "$string",
|
||||
// quote + quote + comma + space = 4
|
||||
if (line_len + len + 4 > MAX_LINE_LEN)
|
||||
{
|
||||
disable_and_flush();
|
||||
return false; // didn't handle the last element
|
||||
}
|
||||
|
||||
//append string to array
|
||||
memcpy(buf_ptr, str, len);
|
||||
buf_ptr+=len;
|
||||
*(buf_ptr++)= 0;
|
||||
line_len += len + 4;
|
||||
return true; // handled
|
||||
}
|
||||
|
||||
disable_and_flush();
|
||||
return false; // not handled
|
||||
}
|
||||
|
||||
void Single_line_formatting_helper::flush_on_one_line()
|
||||
{
|
||||
// append everything to output on one line
|
||||
owner->start_sub_element();
|
||||
char *ptr= buffer;
|
||||
int nr= 0;
|
||||
while (ptr < buf_ptr)
|
||||
{
|
||||
char *str= ptr;
|
||||
|
||||
if (nr == 0)
|
||||
{
|
||||
owner->output.append('"');
|
||||
owner->output.append(str);
|
||||
owner->output.append("\": ");
|
||||
owner->output.append('[');
|
||||
}
|
||||
else
|
||||
{
|
||||
if (nr != 1)
|
||||
owner->output.append(", ");
|
||||
owner->output.append('"');
|
||||
owner->output.append(str);
|
||||
owner->output.append('"');
|
||||
}
|
||||
nr++;
|
||||
|
||||
while (*ptr!=0)
|
||||
ptr++;
|
||||
ptr++;
|
||||
}
|
||||
owner->output.append(']');
|
||||
}
|
||||
|
||||
|
||||
void Single_line_formatting_helper::disable_and_flush()
|
||||
{
|
||||
state= DISABLED;
|
||||
// deactivate ourselves and flush all accumulated calls.
|
||||
char *ptr= buffer;
|
||||
int nr= 0;
|
||||
while (ptr < buf_ptr)
|
||||
{
|
||||
char *str= ptr;
|
||||
if (nr == 0)
|
||||
{
|
||||
owner->add_member(str);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (nr == 1)
|
||||
owner->start_array();
|
||||
owner->add_str(str);
|
||||
}
|
||||
|
||||
nr++;
|
||||
while (*ptr!=0)
|
||||
ptr++;
|
||||
ptr++;
|
||||
}
|
||||
buf_ptr= buffer;
|
||||
state= INACTIVE;
|
||||
}
|
||||
|
118
sql/my_json_writer.h
Normal file
118
sql/my_json_writer.h
Normal file
|
@ -0,0 +1,118 @@
|
|||
/* Todo: SkySQL copyrights */
|
||||
|
||||
class Json_writer;
|
||||
|
||||
/*
|
||||
The idea is to catch arrays that can be printed on one line:
|
||||
|
||||
arrayName : [ "boo", 123, 456 ]
|
||||
|
||||
and actually print them on one line. Arrrays that occupy too much space on
|
||||
the line, or have nested members cannot be printed on one line.
|
||||
|
||||
We hook into JSON printing functions and try to detect the pattern. While
|
||||
detecting the pattern, we will accumulate "boo", 123, 456 as strings.
|
||||
|
||||
Then,
|
||||
- either the pattern is broken, and we print the elements out,
|
||||
- or the pattern lasts till the end of the array, and we print the
|
||||
array on one line.
|
||||
|
||||
TODO:
|
||||
fix the quoting. If we start to accumulate an array and but then it grows
|
||||
too large to be printed on one line, the elements will be printed as
|
||||
strings (even if some of them could be initially numbers).
|
||||
*/
|
||||
|
||||
class Single_line_formatting_helper
|
||||
{
|
||||
enum enum_state
|
||||
{
|
||||
INACTIVE,
|
||||
ADD_MEMBER,
|
||||
IN_ARRAY,
|
||||
DISABLED
|
||||
};
|
||||
|
||||
enum enum_state state;
|
||||
enum { MAX_LINE_LEN= 80 };
|
||||
char buffer[80];
|
||||
char *buf_ptr;
|
||||
uint line_len;
|
||||
|
||||
Json_writer *owner;
|
||||
public:
|
||||
Single_line_formatting_helper() : state(INACTIVE), buf_ptr(buffer) {}
|
||||
|
||||
void init(Json_writer *owner_arg) { owner= owner_arg; }
|
||||
|
||||
bool on_add_member(const char *name);
|
||||
|
||||
bool on_start_array();
|
||||
bool on_end_array();
|
||||
void on_start_object();
|
||||
// on_end_object() is not needed.
|
||||
|
||||
bool on_add_str(const char *str);
|
||||
|
||||
void flush_on_one_line();
|
||||
void disable_and_flush();
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
A class to write well-formed JSON documents. The documents are also formatted
|
||||
for human readability.
|
||||
*/
|
||||
|
||||
class Json_writer
|
||||
{
|
||||
public:
|
||||
/* Add a member. We must be in an object. */
|
||||
Json_writer& add_member(const char *name);
|
||||
|
||||
/* Add atomic values */
|
||||
void add_str(const char* val);
|
||||
void add_str(const String &str);
|
||||
|
||||
void add_ll(longlong val);
|
||||
void add_double(double val);
|
||||
void add_bool(bool val);
|
||||
|
||||
private:
|
||||
void add_unquoted_str(const char* val);
|
||||
public:
|
||||
/* Start a child object */
|
||||
void start_object();
|
||||
void start_array();
|
||||
|
||||
void end_object();
|
||||
void end_array();
|
||||
|
||||
Json_writer() :
|
||||
indent_level(0), document_start(true), element_started(false),
|
||||
first_child(true)
|
||||
{
|
||||
fmt_helper.init(this);
|
||||
}
|
||||
private:
|
||||
// TODO: a stack of (name, bool is_object_or_array) elements.
|
||||
int indent_level;
|
||||
enum { INDENT_SIZE = 2 };
|
||||
|
||||
friend class Single_line_formatting_helper;
|
||||
bool document_start;
|
||||
bool element_started;
|
||||
bool first_child;
|
||||
|
||||
Single_line_formatting_helper fmt_helper;
|
||||
|
||||
void append_indent();
|
||||
void start_element();
|
||||
void start_sub_element();
|
||||
|
||||
//const char *new_member_name;
|
||||
public:
|
||||
String output;
|
||||
};
|
||||
|
|
@ -611,9 +611,13 @@ enum enum_query_type
|
|||
/// Without character set introducers.
|
||||
QT_WITHOUT_INTRODUCERS= (1 << 1),
|
||||
/// view internal representation (like QT_ORDINARY except ORDER BY clause)
|
||||
QT_VIEW_INTERNAL= (1 << 2)
|
||||
QT_VIEW_INTERNAL= (1 << 2),
|
||||
/// This value means focus on readability, not on ability to parse back, etc.
|
||||
QT_EXPLAIN= (1 << 4)
|
||||
};
|
||||
|
||||
|
||||
|
||||
/* query_id */
|
||||
typedef int64 query_id_t;
|
||||
extern query_id_t global_query_id;
|
||||
|
|
|
@ -12233,7 +12233,7 @@ Explain_quick_select* QUICK_RANGE_SELECT::get_explain(MEM_ROOT *alloc)
|
|||
{
|
||||
Explain_quick_select *res;
|
||||
if ((res= new (alloc) Explain_quick_select(QS_TYPE_RANGE)))
|
||||
res->range.set(alloc, head->key_info[index].name, max_used_key_length);
|
||||
res->range.set(alloc, &head->key_info[index], max_used_key_length);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -12242,7 +12242,7 @@ Explain_quick_select* QUICK_GROUP_MIN_MAX_SELECT::get_explain(MEM_ROOT *alloc)
|
|||
{
|
||||
Explain_quick_select *res;
|
||||
if ((res= new (alloc) Explain_quick_select(QS_TYPE_GROUP_MIN_MAX)))
|
||||
res->range.set(alloc, head->key_info[index].name, max_used_key_length);
|
||||
res->range.set(alloc, &head->key_info[index], max_used_key_length);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -2370,7 +2370,11 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
|
|||
int THD::send_explain_fields(select_result *result, uint8 explain_flags, bool is_analyze)
|
||||
{
|
||||
List<Item> field_list;
|
||||
make_explain_field_list(field_list, explain_flags, is_analyze);
|
||||
if (lex->explain_json)
|
||||
make_explain_json_field_list(field_list);
|
||||
else
|
||||
make_explain_field_list(field_list, explain_flags, is_analyze);
|
||||
|
||||
result->prepare(field_list, NULL);
|
||||
return (result->send_result_set_metadata(field_list,
|
||||
Protocol::SEND_NUM_ROWS |
|
||||
|
@ -2378,6 +2382,13 @@ int THD::send_explain_fields(select_result *result, uint8 explain_flags, bool is
|
|||
}
|
||||
|
||||
|
||||
void THD::make_explain_json_field_list(List<Item> &field_list)
|
||||
{
|
||||
Item *item= new Item_empty_string("EXPLAIN", 78, system_charset_info);
|
||||
field_list.push_back(item);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Populate the provided field_list with EXPLAIN output columns.
|
||||
this->lex->describe has the EXPLAIN flags
|
||||
|
|
|
@ -3087,6 +3087,8 @@ public:
|
|||
bool is_analyze);
|
||||
void make_explain_field_list(List<Item> &field_list, uint8 explain_flags,
|
||||
bool is_analyze);
|
||||
void make_explain_json_field_list(List<Item> &field_list);
|
||||
|
||||
/**
|
||||
Clear the current error, if any.
|
||||
We do not clear is_fatal_error or is_fatal_sub_stmt_error since we
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#include "sql_priv.h"
|
||||
#include "sql_select.h"
|
||||
|
||||
#include "my_json_writer.h"
|
||||
|
||||
Explain_query::Explain_query(THD *thd_arg) :
|
||||
upd_del_plan(NULL), insert_plan(NULL), thd(thd_arg), apc_enabled(false)
|
||||
|
@ -139,8 +139,13 @@ int Explain_query::send_explain(THD *thd)
|
|||
thd->send_explain_fields(result, lex->describe, lex->analyze_stmt))
|
||||
return 1;
|
||||
|
||||
int res;
|
||||
if ((res= print_explain(result, lex->describe, lex->analyze_stmt)))
|
||||
int res= 0;
|
||||
if (thd->lex->explain_json)
|
||||
print_explain_json(result, thd->lex->analyze_stmt);
|
||||
else
|
||||
res= print_explain(result, lex->describe, thd->lex->analyze_stmt);
|
||||
|
||||
if (res)
|
||||
result->abort_result_set();
|
||||
else
|
||||
result->send_eof();
|
||||
|
@ -177,6 +182,40 @@ int Explain_query::print_explain(select_result_sink *output,
|
|||
}
|
||||
|
||||
|
||||
void Explain_query::print_explain_json(select_result_sink *output, bool is_analyze)
|
||||
{
|
||||
Json_writer writer;
|
||||
writer.start_object();
|
||||
|
||||
if (upd_del_plan)
|
||||
{
|
||||
//upd_del_plan->print_explain(this, output, explain_flags, is_analyze);
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
else if (insert_plan)
|
||||
{
|
||||
//insert_plan->print_explain(this, output, explain_flags, is_analyze);
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Start printing from node with id=1 */
|
||||
Explain_node *node= get_node(1);
|
||||
if (!node)
|
||||
return; /* No query plan */
|
||||
node->print_explain_json(this, &writer, is_analyze);
|
||||
}
|
||||
|
||||
writer.end_object();
|
||||
|
||||
const CHARSET_INFO *cs= system_charset_info;
|
||||
List<Item> item_list;
|
||||
String *buf= &writer.output;
|
||||
item_list.push_back(new Item_string(buf->ptr(), buf->length(), cs));
|
||||
output->send_data(item_list);
|
||||
}
|
||||
|
||||
|
||||
bool print_explain_for_slow_log(LEX *lex, THD *thd, String *str)
|
||||
{
|
||||
return lex->explain->print_explain_str(thd, str, /*is_analyze*/ true);
|
||||
|
@ -212,12 +251,59 @@ static void push_string(List<Item> *item_list, String *str)
|
|||
item_list->push_back(new Item_string_sys(str->ptr(), str->length()));
|
||||
}
|
||||
|
||||
static void push_string_list(List<Item> *item_list, String_list &lines,
|
||||
String *buf)
|
||||
{
|
||||
List_iterator_fast<char> it(lines);
|
||||
char *line;
|
||||
bool first= true;
|
||||
while ((line= it++))
|
||||
{
|
||||
if (first)
|
||||
first= false;
|
||||
else
|
||||
buf->append(',');
|
||||
|
||||
buf->append(line);
|
||||
}
|
||||
push_string(item_list, buf);
|
||||
}
|
||||
|
||||
|
||||
uint Explain_union::make_union_table_name(char *buf)
|
||||
{
|
||||
uint childno= 0;
|
||||
uint len= 6, lastop= 0;
|
||||
memcpy(buf, STRING_WITH_LEN("<union"));
|
||||
|
||||
for (; childno < union_members.elements() && len + lastop + 5 < NAME_LEN;
|
||||
childno++)
|
||||
{
|
||||
len+= lastop;
|
||||
lastop= my_snprintf(buf + len, NAME_LEN - len,
|
||||
"%u,", union_members.at(childno));
|
||||
}
|
||||
|
||||
if (childno < union_members.elements() || len + lastop >= NAME_LEN)
|
||||
{
|
||||
memcpy(buf + len, STRING_WITH_LEN("...>") + 1);
|
||||
len+= 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
len+= lastop;
|
||||
buf[len - 1]= '>'; // change ',' to '>'
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
|
||||
int Explain_union::print_explain(Explain_query *query,
|
||||
select_result_sink *output,
|
||||
uint8 explain_flags,
|
||||
bool is_analyze)
|
||||
{
|
||||
const CHARSET_INFO *cs= system_charset_info;
|
||||
char table_name_buffer[SAFE_NAME_LEN];
|
||||
|
||||
/* print all UNION children, in order */
|
||||
|
@ -241,31 +327,8 @@ int Explain_union::print_explain(Explain_query *query,
|
|||
push_str(&item_list, fake_select_type);
|
||||
|
||||
/* `table` column: something like "<union1,2>" */
|
||||
{
|
||||
uint childno= 0;
|
||||
uint len= 6, lastop= 0;
|
||||
memcpy(table_name_buffer, STRING_WITH_LEN("<union"));
|
||||
|
||||
for (; childno < union_members.elements() && len + lastop + 5 < NAME_LEN;
|
||||
childno++)
|
||||
{
|
||||
len+= lastop;
|
||||
lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len,
|
||||
"%u,", union_members.at(childno));
|
||||
}
|
||||
|
||||
if (childno < union_members.elements() || len + lastop >= NAME_LEN)
|
||||
{
|
||||
memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1);
|
||||
len+= 4;
|
||||
}
|
||||
else
|
||||
{
|
||||
len+= lastop;
|
||||
table_name_buffer[len - 1]= '>'; // change ',' to '>'
|
||||
}
|
||||
item_list.push_back(new Item_string_sys(table_name_buffer, len));
|
||||
}
|
||||
uint len= make_union_table_name(table_name_buffer);
|
||||
item_list.push_back(new Item_string_sys(table_name_buffer, len));
|
||||
|
||||
/* `partitions` column */
|
||||
if (explain_flags & DESCRIBE_PARTITIONS)
|
||||
|
@ -325,6 +388,36 @@ int Explain_union::print_explain(Explain_query *query,
|
|||
}
|
||||
|
||||
|
||||
void Explain_union::print_explain_json(Explain_query *query,
|
||||
Json_writer *writer, bool is_analyze)
|
||||
{
|
||||
char table_name_buffer[SAFE_NAME_LEN];
|
||||
|
||||
writer->add_member("query_block").start_object();
|
||||
writer->add_member("union_result").start_object();
|
||||
// using_temporary_table
|
||||
make_union_table_name(table_name_buffer);
|
||||
writer->add_member("table_name").add_str(table_name_buffer);
|
||||
writer->add_member("access_type").add_str("ALL"); // not very useful
|
||||
writer->add_member("query_specifications").start_array();
|
||||
|
||||
for (int i= 0; i < (int) union_members.elements(); i++)
|
||||
{
|
||||
writer->start_object();
|
||||
writer->add_member("dependent").add_str("TODO");
|
||||
writer->add_member("cacheable").add_str("TODO");
|
||||
Explain_select *sel= query->get_select(union_members.at(i));
|
||||
sel->print_explain_json(query, writer, is_analyze);
|
||||
writer->end_object();
|
||||
}
|
||||
writer->end_array();
|
||||
|
||||
//TODO: print_explain_for_children
|
||||
|
||||
writer->end_object();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Print EXPLAINs for all children nodes (i.e. for subqueries)
|
||||
*/
|
||||
|
@ -418,21 +511,154 @@ int Explain_select::print_explain(Explain_query *query,
|
|||
}
|
||||
|
||||
|
||||
void Explain_select::print_explain_json(Explain_query *query,
|
||||
Json_writer *writer, bool is_analyze)
|
||||
{
|
||||
writer->add_member("query_block").start_object();
|
||||
writer->add_member("select_id").add_ll(1);
|
||||
if (message)
|
||||
{
|
||||
writer->add_member("table").start_object();
|
||||
writer->add_member("message").add_str(message);
|
||||
writer->end_object();
|
||||
}
|
||||
else
|
||||
{
|
||||
for (uint i=0; i< n_join_tabs; i++)
|
||||
{
|
||||
// psergey-todo: Need to honor SJM nests...
|
||||
join_tabs[i]->print_explain_json(writer, is_analyze);
|
||||
}
|
||||
}
|
||||
writer->end_object();
|
||||
}
|
||||
|
||||
|
||||
void Explain_table_access::push_extra(enum explain_extra_tag extra_tag)
|
||||
{
|
||||
extra_tags.append(extra_tag);
|
||||
}
|
||||
|
||||
|
||||
void Explain_table_access::fill_key_str(String *key_str, bool is_json)
|
||||
{
|
||||
const CHARSET_INFO *cs= system_charset_info;
|
||||
bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
|
||||
type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
|
||||
const char *hash_key_prefix= "#hash#";
|
||||
|
||||
if (key.get_key_name())
|
||||
{
|
||||
if (is_hj)
|
||||
key_str->append(hash_key_prefix, strlen(hash_key_prefix), cs);
|
||||
|
||||
key_str->append(key.get_key_name());
|
||||
|
||||
if (is_hj && type != JT_HASH)
|
||||
key_str->append(':');
|
||||
}
|
||||
|
||||
if (quick_info)
|
||||
{
|
||||
StringBuffer<64> buf2;
|
||||
if (is_json)
|
||||
quick_info->print_extra_recursive(&buf2);
|
||||
else
|
||||
quick_info->print_key(&buf2);
|
||||
key_str->append(buf2);
|
||||
}
|
||||
if (type == JT_HASH_NEXT)
|
||||
key_str->append(hash_next_key.get_key_name());
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Fill "key_length".
|
||||
- this is just used key length for ref/range
|
||||
- for index_merge, it is a comma-separated list of lengths.
|
||||
- for hash join, it is key_len:pseudo_key_len
|
||||
|
||||
The column looks identical in tabular and json forms. In JSON, we consider
|
||||
the column legacy, it is superceded by used_key_parts.
|
||||
*/
|
||||
|
||||
void Explain_table_access::fill_key_len_str(String *key_len_str)
|
||||
{
|
||||
bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
|
||||
type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
|
||||
if (key.get_key_len() != (uint)-1)
|
||||
{
|
||||
char buf[64];
|
||||
size_t length;
|
||||
length= longlong10_to_str(key.get_key_len(), buf, 10) - buf;
|
||||
key_len_str->append(buf, length);
|
||||
if (is_hj && type != JT_HASH)
|
||||
key_len_str->append(':');
|
||||
}
|
||||
|
||||
if (quick_info)
|
||||
{
|
||||
StringBuffer<64> buf2;
|
||||
quick_info->print_key_len(&buf2);
|
||||
key_len_str->append(buf2);
|
||||
}
|
||||
|
||||
if (type == JT_HASH_NEXT)
|
||||
{
|
||||
char buf[64];
|
||||
size_t length;
|
||||
length= longlong10_to_str(hash_next_key.get_key_len(), buf, 10) - buf;
|
||||
key_len_str->append(buf, length);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Explain_index_use::set(MEM_ROOT *mem_root, KEY *key, uint key_len_arg)
|
||||
{
|
||||
set_pseudo_key(mem_root, key->name);
|
||||
key_len= key_len_arg;
|
||||
uint len= 0;
|
||||
for (uint i= 0; i < key->usable_key_parts; i++)
|
||||
{
|
||||
key_parts_list.append_str(mem_root, key->key_part[i].field->field_name);
|
||||
len += key->key_part[i].store_length;
|
||||
if (len >= key_len_arg)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Explain_index_use::set_pseudo_key(MEM_ROOT *root, const char* key_name_arg)
|
||||
{
|
||||
if (key_name_arg)
|
||||
{
|
||||
size_t name_len= strlen(key_name_arg);
|
||||
if ((key_name= (char*)alloc_root(root, name_len+1)))
|
||||
memcpy(key_name, key_name_arg, name_len+1);
|
||||
}
|
||||
else
|
||||
key_name= NULL;
|
||||
key_len= -1;
|
||||
}
|
||||
|
||||
|
||||
double Explain_table_access::get_r_filtered()
|
||||
{
|
||||
//psergey-todo: modify this to produce separate filtered% for both parts of
|
||||
//WHERE.
|
||||
double r_filtered= tracker.get_filtered_after_where();
|
||||
if (bka_type.is_using_jbuf())
|
||||
r_filtered *= jbuf_tracker.get_filtered_after_where();
|
||||
return r_filtered;
|
||||
}
|
||||
|
||||
|
||||
int Explain_table_access::print_explain(select_result_sink *output, uint8 explain_flags,
|
||||
bool is_analyze,
|
||||
uint select_id, const char *select_type,
|
||||
bool using_temporary, bool using_filesort)
|
||||
{
|
||||
const CHARSET_INFO *cs= system_charset_info;
|
||||
const char *hash_key_prefix= "#hash#";
|
||||
bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
|
||||
type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
|
||||
|
||||
List<Item> item_list;
|
||||
Item *item_null= new Item_null();
|
||||
|
@ -467,32 +693,15 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
|
|||
push_str(&item_list, join_type_str[type]);
|
||||
|
||||
/* `possible_keys` column */
|
||||
if (possible_keys_str.length() > 0)
|
||||
push_string(&item_list, &possible_keys_str);
|
||||
else
|
||||
StringBuffer<64> possible_keys_buf;
|
||||
if (possible_keys.is_empty())
|
||||
item_list.push_back(item_null);
|
||||
else
|
||||
push_string_list(&item_list, possible_keys, &possible_keys_buf);
|
||||
|
||||
/* `key` */
|
||||
StringBuffer<64> key_str;
|
||||
if (key.get_key_name())
|
||||
{
|
||||
if (is_hj)
|
||||
key_str.append(hash_key_prefix, strlen(hash_key_prefix), cs);
|
||||
|
||||
key_str.append(key.get_key_name());
|
||||
|
||||
if (is_hj && type != JT_HASH)
|
||||
key_str.append(':');
|
||||
}
|
||||
|
||||
if (quick_info)
|
||||
{
|
||||
StringBuffer<64> buf2;
|
||||
quick_info->print_key(&buf2);
|
||||
key_str.append(buf2);
|
||||
}
|
||||
if (type == JT_HASH_NEXT)
|
||||
key_str.append(hash_next_key.get_key_name());
|
||||
fill_key_str(&key_str, false);
|
||||
|
||||
if (key_str.length() > 0)
|
||||
push_string(&item_list, &key_str);
|
||||
|
@ -501,31 +710,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
|
|||
|
||||
/* `key_len` */
|
||||
StringBuffer<64> key_len_str;
|
||||
|
||||
if (key.get_key_len() != (uint)-1)
|
||||
{
|
||||
char buf[64];
|
||||
size_t length;
|
||||
length= longlong10_to_str(key.get_key_len(), buf, 10) - buf;
|
||||
key_len_str.append(buf, length);
|
||||
if (is_hj && type != JT_HASH)
|
||||
key_len_str.append(':');
|
||||
}
|
||||
|
||||
if (quick_info)
|
||||
{
|
||||
StringBuffer<64> buf2;
|
||||
quick_info->print_key_len(&buf2);
|
||||
key_len_str.append(buf2);
|
||||
}
|
||||
|
||||
if (type == JT_HASH_NEXT)
|
||||
{
|
||||
char buf[64];
|
||||
size_t length;
|
||||
length= longlong10_to_str(hash_next_key.get_key_len(), buf, 10) - buf;
|
||||
key_len_str.append(buf, length);
|
||||
}
|
||||
fill_key_len_str(&key_len_str);
|
||||
|
||||
if (key_len_str.length() > 0)
|
||||
push_string(&item_list, &key_len_str);
|
||||
|
@ -533,10 +718,19 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
|
|||
item_list.push_back(item_null);
|
||||
|
||||
/* `ref` */
|
||||
if (ref_set)
|
||||
push_string(&item_list, &ref);
|
||||
StringBuffer<64> ref_list_buf;
|
||||
if (ref_list.is_empty())
|
||||
{
|
||||
if (type == JT_FT)
|
||||
{
|
||||
/* Traditionally, EXPLAIN lines with type=fulltext have ref='' */
|
||||
push_str(&item_list, "");
|
||||
}
|
||||
else
|
||||
item_list.push_back(item_null);
|
||||
}
|
||||
else
|
||||
item_list.push_back(item_null);
|
||||
push_string_list(&item_list, ref_list, &ref_list_buf);
|
||||
|
||||
/* `rows` */
|
||||
if (rows_set)
|
||||
|
@ -628,6 +822,180 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
|
|||
}
|
||||
|
||||
|
||||
bool String_list::append_str(MEM_ROOT *mem_root, const char *str)
|
||||
{
|
||||
size_t len= strlen(str);
|
||||
char *cp;
|
||||
if (!(cp = (char*)alloc_root(mem_root, len+1)))
|
||||
return 1;
|
||||
memcpy(cp, str, len+1);
|
||||
push_back(cp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void write_item(Json_writer *writer, Item *item)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
char item_buf[256];
|
||||
String str(item_buf, sizeof(item_buf), &my_charset_bin);
|
||||
str.length(0);
|
||||
|
||||
ulonglong save_option_bits= thd->variables.option_bits;
|
||||
thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
|
||||
|
||||
item->print(&str, QT_EXPLAIN);
|
||||
|
||||
thd->variables.option_bits= save_option_bits;
|
||||
writer->add_str(str.c_ptr_safe());
|
||||
}
|
||||
|
||||
|
||||
void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_tag tag)
|
||||
{
|
||||
switch (tag)
|
||||
{
|
||||
case ET_OPEN_FULL_TABLE:
|
||||
writer->add_member("open_full_table").add_bool(true);
|
||||
break;
|
||||
case ET_SCANNED_0_DATABASES:
|
||||
writer->add_member("scanned_databases").add_ll(0);
|
||||
break;
|
||||
case ET_SCANNED_1_DATABASE:
|
||||
writer->add_member("scanned_databases").add_ll(1);
|
||||
break;
|
||||
case ET_SCANNED_ALL_DATABASES:
|
||||
writer->add_member("scanned_databases").add_str("all");
|
||||
break;
|
||||
case ET_SKIP_OPEN_TABLE:
|
||||
writer->add_member("skip_open_table").add_bool(true);
|
||||
break;
|
||||
case ET_OPEN_FRM_ONLY:
|
||||
writer->add_member("open_frm_only").add_bool(true);
|
||||
break;
|
||||
case ET_USING_INDEX_CONDITION:
|
||||
writer->add_member("index_condition");
|
||||
write_item(writer, pushed_index_cond);
|
||||
break;
|
||||
case ET_USING_WHERE:
|
||||
writer->add_member("attached_condition");
|
||||
write_item(writer, where_cond);
|
||||
break;
|
||||
case ET_USING_INDEX:
|
||||
writer->add_member("using_index").add_bool(true);
|
||||
break;
|
||||
case ET_USING:
|
||||
// index merge: case ET_USING
|
||||
break;
|
||||
case ET_USING_JOIN_BUFFER:
|
||||
// TODO TODO
|
||||
break;
|
||||
default:
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Explain_table_access::print_explain_json(Json_writer *writer,
|
||||
bool is_analyze)
|
||||
{
|
||||
writer->add_member("table").start_object();
|
||||
|
||||
writer->add_member("table_name").add_str(table_name);
|
||||
// partitions
|
||||
writer->add_member("access_type").add_str(join_type_str[type]);
|
||||
if (!possible_keys.is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(possible_keys);
|
||||
const char *name;
|
||||
writer->add_member("possible_keys").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
|
||||
/* `key` */
|
||||
/* For non-basic quick select, 'key' will not be present */
|
||||
if (!quick_info || quick_info->is_basic())
|
||||
{
|
||||
StringBuffer<64> key_str;
|
||||
fill_key_str(&key_str, true);
|
||||
if (key_str.length())
|
||||
writer->add_member("key").add_str(key_str);
|
||||
}
|
||||
|
||||
/* `key_length` */
|
||||
StringBuffer<64> key_len_str;
|
||||
fill_key_len_str(&key_len_str);
|
||||
if (key_len_str.length())
|
||||
writer->add_member("key_length").add_str(key_len_str);
|
||||
|
||||
/* `used_key_parts` */
|
||||
String_list *parts_list= NULL;
|
||||
if (quick_info && quick_info->is_basic())
|
||||
parts_list= &quick_info->range.key_parts_list;
|
||||
else
|
||||
parts_list= &key.key_parts_list;
|
||||
|
||||
if (parts_list && !parts_list->is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(*parts_list);
|
||||
const char *name;
|
||||
writer->add_member("used_key_parts").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
}
|
||||
|
||||
if (quick_info && !quick_info->is_basic())
|
||||
{
|
||||
writer->add_member("index_merge").start_object();
|
||||
quick_info->print_json(writer);
|
||||
writer->end_object();
|
||||
}
|
||||
|
||||
|
||||
// TODO: here, if quick select is not basic, print its nested form.
|
||||
|
||||
/* `ref` */
|
||||
if (!ref_list.is_empty())
|
||||
{
|
||||
List_iterator_fast<char> it(ref_list);
|
||||
const char *str;
|
||||
writer->add_member("ref").start_array();
|
||||
while ((str= it++))
|
||||
writer->add_str(str);
|
||||
writer->end_array();
|
||||
}
|
||||
|
||||
/* `rows` */
|
||||
if (rows_set)
|
||||
writer->add_member("rows").add_ll(rows);
|
||||
|
||||
/* `r_rows` */
|
||||
if (is_analyze && tracker.has_scans())
|
||||
{
|
||||
ha_rows avg_rows= tracker.get_avg_rows();
|
||||
writer->add_member("r_rows").add_ll(avg_rows);
|
||||
}
|
||||
|
||||
/* `filtered` */
|
||||
if (filtered_set)
|
||||
writer->add_member("filtered").add_double(filtered);
|
||||
|
||||
/* `r_filtered` */
|
||||
if (is_analyze)
|
||||
writer->add_member("r_filtered").add_double(get_r_filtered());
|
||||
|
||||
for (int i=0; i < (int)extra_tags.elements(); i++)
|
||||
{
|
||||
tag_to_json(writer, extra_tags.at(i));
|
||||
}
|
||||
|
||||
writer->end_object();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Elements in this array match members of enum Extra_tag, defined in
|
||||
sql_explain.h
|
||||
|
@ -757,6 +1125,35 @@ void Explain_quick_select::print_extra(String *str)
|
|||
print_extra_recursive(str);
|
||||
}
|
||||
|
||||
void Explain_quick_select::print_json(Json_writer *writer)
|
||||
{
|
||||
if (is_basic())
|
||||
{
|
||||
writer->add_member("range").start_object();
|
||||
|
||||
writer->add_member("key").add_str(range.get_key_name());
|
||||
|
||||
List_iterator_fast<char> it(range.key_parts_list);
|
||||
const char *name;
|
||||
writer->add_member("used_key_parts").start_array();
|
||||
while ((name= it++))
|
||||
writer->add_str(name);
|
||||
writer->end_array();
|
||||
|
||||
writer->end_object();
|
||||
}
|
||||
else
|
||||
{
|
||||
writer->add_member(get_name_by_type()).start_object();
|
||||
|
||||
List_iterator_fast<Explain_quick_select> it (children);
|
||||
Explain_quick_select* child;
|
||||
while ((child = it++))
|
||||
child->print_json(writer);
|
||||
|
||||
writer->end_object();
|
||||
}
|
||||
}
|
||||
|
||||
void Explain_quick_select::print_extra_recursive(String *str)
|
||||
{
|
||||
|
|
|
@ -14,6 +14,15 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
|
||||
class String_list: public List<char>
|
||||
{
|
||||
public:
|
||||
bool append_str(MEM_ROOT *mem_root, const char *str);
|
||||
};
|
||||
|
||||
|
||||
|
||||
/* Data structures for ANALYZE */
|
||||
class Table_access_tracker
|
||||
{
|
||||
|
@ -67,6 +76,7 @@ const int FAKE_SELECT_LEX_ID= (int)UINT_MAX;
|
|||
|
||||
class Explain_query;
|
||||
|
||||
class Json_writer;
|
||||
/*
|
||||
A node can be either a SELECT, or a UNION.
|
||||
*/
|
||||
|
@ -97,7 +107,9 @@ public:
|
|||
|
||||
virtual int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze)=0;
|
||||
|
||||
virtual void print_explain_json(Explain_query *query, Json_writer *writer,
|
||||
bool is_analyze)= 0;
|
||||
|
||||
int print_explain_for_children(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
virtual ~Explain_node(){}
|
||||
|
@ -177,6 +189,8 @@ public:
|
|||
|
||||
int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
void print_explain_json(Explain_query *query, Json_writer *writer,
|
||||
bool is_analyze);
|
||||
|
||||
Table_access_tracker *get_using_temporary_read_tracker()
|
||||
{
|
||||
|
@ -222,6 +236,8 @@ public:
|
|||
}
|
||||
int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
void print_explain_json(Explain_query *query, Json_writer *writer,
|
||||
bool is_analyze);
|
||||
|
||||
const char *fake_select_type;
|
||||
bool using_filesort;
|
||||
|
@ -236,6 +252,8 @@ public:
|
|||
return &tmptable_read_tracker;
|
||||
}
|
||||
private:
|
||||
uint make_union_table_name(char *buf);
|
||||
|
||||
Table_access_tracker fake_select_lex_tracker;
|
||||
/* This one is for reading after ORDER BY */
|
||||
Table_access_tracker tmptable_read_tracker;
|
||||
|
@ -311,6 +329,8 @@ public:
|
|||
/* Return tabular EXPLAIN output as a text string */
|
||||
bool print_explain_str(THD *thd, String *out_str, bool is_analyze);
|
||||
|
||||
void print_explain_json(select_result_sink *output, bool is_analyze);
|
||||
|
||||
/* If true, at least part of EXPLAIN can be printed */
|
||||
bool have_query_plan() { return insert_plan || upd_del_plan|| get_node(1) != NULL; }
|
||||
|
||||
|
@ -407,21 +427,16 @@ class Explain_index_use : public Sql_alloc
|
|||
{
|
||||
char *key_name;
|
||||
uint key_len;
|
||||
/* will add #keyparts here if we implement EXPLAIN FORMAT=JSON */
|
||||
public:
|
||||
|
||||
void set(MEM_ROOT *root, const char *key_name_arg, uint key_len_arg)
|
||||
String_list key_parts_list;
|
||||
|
||||
void clear()
|
||||
{
|
||||
if (key_name_arg)
|
||||
{
|
||||
size_t name_len= strlen(key_name_arg);
|
||||
if ((key_name= (char*)alloc_root(root, name_len+1)))
|
||||
memcpy(key_name, key_name_arg, name_len+1);
|
||||
}
|
||||
else
|
||||
key_name= NULL;
|
||||
key_len= key_len_arg;
|
||||
key_name= NULL;
|
||||
key_len= (uint)-1;
|
||||
}
|
||||
void set(MEM_ROOT *root, KEY *key_name, uint key_len_arg);
|
||||
void set_pseudo_key(MEM_ROOT *root, const char *key_name);
|
||||
|
||||
inline const char *get_key_name() { return key_name; }
|
||||
inline uint get_key_len() { return key_len; }
|
||||
|
@ -438,6 +453,13 @@ public:
|
|||
{}
|
||||
|
||||
const int quick_type;
|
||||
|
||||
bool is_basic()
|
||||
{
|
||||
return (quick_type == QUICK_SELECT_I::QS_TYPE_RANGE ||
|
||||
quick_type == QUICK_SELECT_I::QS_TYPE_RANGE_DESC ||
|
||||
quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX);
|
||||
}
|
||||
|
||||
/* This is used when quick_type == QUICK_SELECT_I::QS_TYPE_RANGE */
|
||||
Explain_index_use range;
|
||||
|
@ -448,8 +470,11 @@ public:
|
|||
void print_extra(String *str);
|
||||
void print_key(String *str);
|
||||
void print_key_len(String *str);
|
||||
private:
|
||||
|
||||
void print_json(Json_writer *writer);
|
||||
|
||||
void print_extra_recursive(String *str);
|
||||
private:
|
||||
const char *get_name_by_type();
|
||||
};
|
||||
|
||||
|
@ -479,8 +504,8 @@ public:
|
|||
StringBuffer<32> used_partitions;
|
||||
bool used_partitions_set;
|
||||
|
||||
/* Empty string means "NULL" will be printed */
|
||||
StringBuffer<32> possible_keys_str;
|
||||
/* Empty means "NULL" will be printed */
|
||||
String_list possible_keys;
|
||||
|
||||
/*
|
||||
Index use: key name and length.
|
||||
|
@ -498,8 +523,7 @@ public:
|
|||
*/
|
||||
Explain_index_use hash_next_key;
|
||||
|
||||
bool ref_set; /* not set means 'NULL' should be printed */
|
||||
StringBuffer<32> ref;
|
||||
String_list ref_list;
|
||||
|
||||
bool rows_set; /* not set means 'NULL' should be printed */
|
||||
ha_rows rows;
|
||||
|
@ -529,11 +553,19 @@ public:
|
|||
EXPLAIN_BKA_TYPE bka_type;
|
||||
|
||||
StringBuffer<32> firstmatch_table_name;
|
||||
|
||||
/*
|
||||
Note: lifespan of WHERE condition is less than lifespan of this object.
|
||||
THe below is valid if tags include "ET_USING_WHERE".
|
||||
*/
|
||||
Item *where_cond;
|
||||
Item *pushed_index_cond;
|
||||
|
||||
int print_explain(select_result_sink *output, uint8 explain_flags,
|
||||
bool is_analyze,
|
||||
uint select_id, const char *select_type,
|
||||
bool using_temporary, bool using_filesort);
|
||||
void print_explain_json(Json_writer *writer, bool is_analyze);
|
||||
|
||||
/* ANALYZE members*/
|
||||
Table_access_tracker tracker;
|
||||
|
@ -541,6 +573,10 @@ public:
|
|||
|
||||
private:
|
||||
void append_tag_name(String *str, enum explain_extra_tag tag);
|
||||
void fill_key_str(String *key_str, bool is_json);
|
||||
void fill_key_len_str(String *key_len_str);
|
||||
double get_r_filtered();
|
||||
void tag_to_json(Json_writer *writer, enum explain_extra_tag tag);
|
||||
};
|
||||
|
||||
|
||||
|
@ -585,6 +621,8 @@ public:
|
|||
|
||||
virtual int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
virtual void print_explain_json(Explain_query *query, Json_writer *writer, bool is_analyze)
|
||||
{ /* EXPLAIN_JSON_NOT_IMPL */}
|
||||
};
|
||||
|
||||
|
||||
|
@ -605,6 +643,9 @@ public:
|
|||
|
||||
int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
void print_explain_json(Explain_query *query, Json_writer *writer,
|
||||
bool is_analyze)
|
||||
{ /* EXPLAIN_JSON_NOT_IMPL */}
|
||||
};
|
||||
|
||||
|
||||
|
@ -626,6 +667,8 @@ public:
|
|||
|
||||
virtual int print_explain(Explain_query *query, select_result_sink *output,
|
||||
uint8 explain_flags, bool is_analyze);
|
||||
virtual void print_explain_json(Explain_query *query, Json_writer *writer, bool is_analyze)
|
||||
{ /* EXPLAIN_JSON_NOT_IMPL */}
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -484,6 +484,7 @@ void lex_start(THD *thd)
|
|||
lex->select_lex.group_list_ptrs->clear();
|
||||
lex->describe= 0;
|
||||
lex->analyze_stmt= 0;
|
||||
lex->explain_json= false;
|
||||
lex->subqueries= FALSE;
|
||||
lex->context_analysis_only= 0;
|
||||
lex->derived_tables= 0;
|
||||
|
|
|
@ -2500,6 +2500,7 @@ struct LEX: public Query_tables_list
|
|||
uint table_count;
|
||||
uint8 describe;
|
||||
bool analyze_stmt; /* TRUE<=> this is "ANALYZE $stmt" */
|
||||
bool explain_json;
|
||||
/*
|
||||
A flag that indicates what kinds of derived tables are present in the
|
||||
query (0 if no derived tables, otherwise a combination of flags
|
||||
|
|
|
@ -97,6 +97,8 @@
|
|||
#include "log_slow.h"
|
||||
#include "sql_bootstrap.h"
|
||||
|
||||
#include "my_json_writer.h"
|
||||
|
||||
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
|
||||
|
||||
#ifdef WITH_ARIA_STORAGE_ENGINE
|
||||
|
@ -5730,19 +5732,27 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
|
|||
top-level LIMIT
|
||||
*/
|
||||
result->reset_offset_limit();
|
||||
lex->explain->print_explain(result, lex->describe, lex->analyze_stmt);
|
||||
if (lex->describe & DESCRIBE_EXTENDED)
|
||||
if (lex->explain_json)
|
||||
{
|
||||
char buff[1024];
|
||||
String str(buff,(uint32) sizeof(buff), system_charset_info);
|
||||
str.length(0);
|
||||
/*
|
||||
The warnings system requires input in utf8, @see
|
||||
mysqld_show_warnings().
|
||||
*/
|
||||
lex->unit.print(&str, QT_TO_SYSTEM_CHARSET);
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_YES, str.c_ptr_safe());
|
||||
lex->explain->print_explain_json(result, lex->analyze_stmt);
|
||||
}
|
||||
else
|
||||
{
|
||||
lex->explain->print_explain(result, thd->lex->describe,
|
||||
thd->lex->analyze_stmt);
|
||||
if (lex->describe & DESCRIBE_EXTENDED)
|
||||
{
|
||||
char buff[1024];
|
||||
String str(buff,(uint32) sizeof(buff), system_charset_info);
|
||||
str.length(0);
|
||||
/*
|
||||
The warnings system requires input in utf8, @see
|
||||
mysqld_show_warnings().
|
||||
*/
|
||||
lex->unit.print(&str, QT_TO_SYSTEM_CHARSET);
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_YES, str.c_ptr_safe());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20791,6 +20791,24 @@ static void free_blobs(Field **ptr)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
@brief
|
||||
Remove duplicates from a temporary table.
|
||||
|
||||
@detail
|
||||
Remove duplicate rows from a temporary table. This is used for e.g. queries
|
||||
like
|
||||
|
||||
select distinct count(*) as CNT from tbl group by col
|
||||
|
||||
Here, we get a group table with count(*) values. It is not possible to
|
||||
prevent duplicates from appearing in the table (as we don't know the values
|
||||
before we've done the grouping). Because of that, we have this function to
|
||||
scan the temptable (maybe, multiple times) and remove the duplicate rows
|
||||
|
||||
Rows that do not satisfy 'having' condition are also removed.
|
||||
*/
|
||||
|
||||
static int
|
||||
remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
|
||||
{
|
||||
|
@ -23051,7 +23069,6 @@ void JOIN::clear()
|
|||
|
||||
/*
|
||||
Print an EXPLAIN line with all NULLs and given message in the 'Extra' column
|
||||
TODO: is_analyze
|
||||
*/
|
||||
|
||||
int print_explain_message_line(select_result_sink *result,
|
||||
|
@ -23333,21 +23350,16 @@ void explain_append_mrr_info(QUICK_RANGE_SELECT *quick, String *res)
|
|||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// TODO: join with make_possible_keys_line ?
|
||||
void append_possible_keys(String *str, TABLE *table, key_map possible_keys)
|
||||
int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table,
|
||||
key_map possible_keys)
|
||||
{
|
||||
uint j;
|
||||
for (j=0 ; j < table->s->keys ; j++)
|
||||
{
|
||||
if (possible_keys.is_set(j))
|
||||
{
|
||||
if (str->length())
|
||||
str->append(',');
|
||||
str->append(table->key_info[j].name,
|
||||
strlen(table->key_info[j].name),
|
||||
system_charset_info);
|
||||
}
|
||||
list.append_str(alloc, table->key_info[j].name);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO: this function is only applicable for the first non-const optimization
|
||||
|
@ -23380,17 +23392,14 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
|
||||
TABLE *table=tab->table;
|
||||
TABLE_LIST *table_list= tab->table->pos_in_table_list;
|
||||
char buff4[512];
|
||||
my_bool key_read;
|
||||
char table_name_buffer[SAFE_NAME_LEN];
|
||||
String tmp4(buff4,sizeof(buff4),cs);
|
||||
KEY *key_info= 0;
|
||||
uint key_len= 0;
|
||||
tmp4.length(0);
|
||||
quick_type= -1;
|
||||
QUICK_SELECT_I *quick= NULL;
|
||||
|
||||
eta->key.set(thd->mem_root, NULL, (uint)-1);
|
||||
eta->key.clear();
|
||||
eta->quick_info= NULL;
|
||||
|
||||
tab->tracker= &eta->tracker;
|
||||
|
@ -23488,7 +23497,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
eta->type= tab_type;
|
||||
|
||||
/* Build "possible_keys" value */
|
||||
append_possible_keys(&eta->possible_keys_str, table, tab->keys);
|
||||
// psergey-todo: why does this use thd MEM_ROOT??? Doesn't this
|
||||
// break ANALYZE ? thd->mem_root will be freed, and after that we will
|
||||
// attempt to print the query plan?
|
||||
append_possible_keys(thd->mem_root, eta->possible_keys, table, tab->keys);
|
||||
// psergey-todo: ^ check for error return code
|
||||
|
||||
/* Build "key", "key_len", and "ref" */
|
||||
if (tab_type == JT_NEXT)
|
||||
|
@ -23513,21 +23526,18 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
|
||||
if (key_info) /* 'index' or 'ref' access */
|
||||
{
|
||||
eta->key.set(thd->mem_root, key_info->name, key_len);
|
||||
eta->key.set(thd->mem_root, key_info, key_len);
|
||||
|
||||
if (tab->ref.key_parts && tab_type != JT_FT)
|
||||
{
|
||||
store_key **ref=tab->ref.key_copy;
|
||||
for (uint kp= 0; kp < tab->ref.key_parts; kp++)
|
||||
{
|
||||
if (tmp4.length())
|
||||
tmp4.append(',');
|
||||
|
||||
if ((key_part_map(1) << kp) & tab->ref.const_ref_part_map)
|
||||
tmp4.append("const");
|
||||
eta->ref_list.append_str(thd->mem_root, "const");
|
||||
else
|
||||
{
|
||||
tmp4.append((*ref)->name(), strlen((*ref)->name()), cs);
|
||||
eta->ref_list.append_str(thd->mem_root, (*ref)->name());
|
||||
ref++;
|
||||
}
|
||||
}
|
||||
|
@ -23537,21 +23547,13 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
if (tab_type == JT_HASH_NEXT) /* full index scan + hash join */
|
||||
{
|
||||
eta->hash_next_key.set(thd->mem_root,
|
||||
table->key_info[tab->index].name,
|
||||
& table->key_info[tab->index],
|
||||
table->key_info[tab->index].key_length);
|
||||
// psergey-todo: ^ is the above correct? are we necessarily joining on all
|
||||
// columns?
|
||||
}
|
||||
|
||||
if (key_info)
|
||||
{
|
||||
if (key_info && tab_type != JT_NEXT)
|
||||
{
|
||||
eta->ref.copy(tmp4);
|
||||
eta->ref_set= true;
|
||||
}
|
||||
else
|
||||
eta->ref_set= false;
|
||||
}
|
||||
else
|
||||
if (!key_info)
|
||||
{
|
||||
if (table_list && /* SJM bushes don't have table_list */
|
||||
table_list->schema_table &&
|
||||
|
@ -23582,9 +23584,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
}
|
||||
|
||||
if (key_name_buf.length())
|
||||
eta->key.set(thd->mem_root, key_name_buf.c_ptr_safe(), -1);
|
||||
eta->key.set_pseudo_key(thd->mem_root, key_name_buf.c_ptr_safe());
|
||||
}
|
||||
eta->ref_set= false;
|
||||
}
|
||||
|
||||
/* "rows" */
|
||||
|
@ -23649,7 +23650,10 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
|
||||
if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno &&
|
||||
table->file->pushed_idx_cond)
|
||||
{
|
||||
eta->push_extra(ET_USING_INDEX_CONDITION);
|
||||
eta->pushed_index_cond= table->file->pushed_idx_cond;
|
||||
}
|
||||
else if (tab->cache_idx_cond)
|
||||
eta->push_extra(ET_USING_INDEX_CONDITION_BKA);
|
||||
|
||||
|
@ -23679,7 +23683,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab
|
|||
eta->push_extra(ET_USING_WHERE_WITH_PUSHED_CONDITION);
|
||||
}
|
||||
else
|
||||
{
|
||||
eta->where_cond= tab->select->cond? tab->select->cond:
|
||||
tab->cache_select->cond;
|
||||
eta->push_extra(ET_USING_WHERE);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (table_list /* SJM bushes don't have table_list */ &&
|
||||
|
|
|
@ -982,7 +982,13 @@ public:
|
|||
*/
|
||||
uint top_join_tab_count;
|
||||
uint send_group_parts;
|
||||
bool group; /**< If query contains GROUP BY clause */
|
||||
/*
|
||||
True if the query has GROUP BY.
|
||||
(that is, if group_by != NULL. when DISTINCT is converted into GROUP BY, it
|
||||
will set this, too. It is not clear why we need a separate var from
|
||||
group_list)
|
||||
*/
|
||||
bool group;
|
||||
bool need_distinct;
|
||||
|
||||
/**
|
||||
|
@ -1914,4 +1920,5 @@ ulong check_selectivity(THD *thd,
|
|||
TABLE *table,
|
||||
List<COND_STATISTIC> *conds);
|
||||
|
||||
|
||||
#endif /* SQL_SELECT_INCLUDED */
|
||||
|
|
|
@ -1150,6 +1150,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
|
|||
%token FORCE_SYM
|
||||
%token FOREIGN /* SQL-2003-R */
|
||||
%token FOR_SYM /* SQL-2003-R */
|
||||
%token FORMAT_SYM
|
||||
%token FOUND_SYM /* SQL-2003-R */
|
||||
%token FROM
|
||||
%token FULL /* SQL-2003-R */
|
||||
|
@ -1823,6 +1824,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
|
|||
subselect_end select_var_list select_var_list_init help
|
||||
field_length opt_field_length
|
||||
opt_extended_describe shutdown
|
||||
opt_format_json
|
||||
prepare prepare_src execute deallocate
|
||||
statement sp_suid
|
||||
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
|
||||
|
@ -9737,6 +9739,18 @@ function_call_conflict:
|
|||
if ($$ == NULL)
|
||||
MYSQL_YYABORT;
|
||||
}
|
||||
| FORMAT_SYM '(' expr ',' expr ')'
|
||||
{
|
||||
$$= new (thd->mem_root) Item_func_format($3, $5);
|
||||
if ($$ == NULL)
|
||||
MYSQL_YYABORT;
|
||||
}
|
||||
| FORMAT_SYM '(' expr ',' expr ',' expr ')'
|
||||
{
|
||||
$$= new (thd->mem_root) Item_func_format($3, $5, $7);
|
||||
if ($$ == NULL)
|
||||
MYSQL_YYABORT;
|
||||
}
|
||||
| LAST_VALUE '(' expr_list ')'
|
||||
{
|
||||
$$= new (thd->mem_root) Item_func_last_value(* $3);
|
||||
|
@ -12765,16 +12779,34 @@ describe_command:
|
|||
;
|
||||
|
||||
analyze_stmt_command:
|
||||
ANALYZE_SYM explainable_command
|
||||
ANALYZE_SYM opt_format_json explainable_command
|
||||
{
|
||||
Lex->analyze_stmt= true;
|
||||
}
|
||||
;
|
||||
|
||||
opt_extended_describe:
|
||||
/* empty */ {}
|
||||
| EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
|
||||
EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
|
||||
| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
|
||||
| opt_format_json {}
|
||||
;
|
||||
|
||||
opt_format_json:
|
||||
/* empty */ {}
|
||||
| FORMAT_SYM EQ ident_or_text
|
||||
{
|
||||
if (!my_strcasecmp(system_charset_info, $3.str, "JSON"))
|
||||
Lex->explain_json= true;
|
||||
else if (!my_strcasecmp(system_charset_info, $3.str, "TRADITIONAL"))
|
||||
{
|
||||
DBUG_ASSERT(Lex->explain_json==false);
|
||||
}
|
||||
else
|
||||
{
|
||||
my_error(ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), $3.str);
|
||||
MYSQL_YYABORT;
|
||||
}
|
||||
}
|
||||
;
|
||||
|
||||
opt_describe_column:
|
||||
|
@ -14019,6 +14051,7 @@ keyword:
|
|||
| EXAMINED_SYM {}
|
||||
| EXECUTE_SYM {}
|
||||
| FLUSH_SYM {}
|
||||
| FORMAT_SYM {}
|
||||
| GET_SYM {}
|
||||
| HANDLER_SYM {}
|
||||
| HELP_SYM {}
|
||||
|
|
|
@ -1072,6 +1072,12 @@ public:
|
|||
TABLE_LIST *pos_in_table_list;/* Element referring to this table */
|
||||
/* Position in thd->locked_table_list under LOCK TABLES */
|
||||
TABLE_LIST *pos_in_locked_tables;
|
||||
|
||||
/*
|
||||
Not-null for temporary tables only. Non-null values means this table is
|
||||
used to compute GROUP BY, it has a unique of GROUP BY columns.
|
||||
(set by create_tmp_table)
|
||||
*/
|
||||
ORDER *group;
|
||||
String alias; /* alias or table name */
|
||||
uchar *null_flags;
|
||||
|
|
Loading…
Add table
Reference in a new issue