mirror of
https://github.com/MariaDB/server.git
synced 2025-01-26 00:34:18 +01:00
MDEV-27036: re-enable my_json_writer-t unit test
This commit is contained in:
parent
9feaa6be07
commit
2d21917e7d
17 changed files with 199 additions and 152 deletions
|
@ -142,6 +142,7 @@ IF (NOT CPACK_GENERATOR)
|
|||
ENDIF(WIN32)
|
||||
ENDIF(NOT CPACK_GENERATOR)
|
||||
|
||||
INCLUDE(build_depends)
|
||||
INCLUDE(FeatureSummary)
|
||||
INCLUDE(misc)
|
||||
INCLUDE(mysql_version)
|
||||
|
@ -538,8 +539,6 @@ IF(UNIX)
|
|||
INSTALL_DOCUMENTATION(Docs/INSTALL-BINARY Docs/README-wsrep COMPONENT Readme)
|
||||
ENDIF()
|
||||
|
||||
INCLUDE(build_depends)
|
||||
|
||||
INCLUDE(CPack)
|
||||
|
||||
IF(WIN32 AND SIGNCODE)
|
||||
|
|
|
@ -22,3 +22,4 @@ MACRO(MY_ADD_TESTS)
|
|||
ENDFOREACH()
|
||||
ENDMACRO()
|
||||
|
||||
FIND_PACKAGE(Boost COMPONENTS unit_test_framework)
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
#define MY_DIR_H
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "my_global.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
|
@ -206,7 +206,7 @@ RECOMPILE_FOR_EMBEDDED)
|
|||
ADD_LIBRARY(sql STATIC ${SQL_SOURCE})
|
||||
MAYBE_DISABLE_IPO(sql)
|
||||
DTRACE_INSTRUMENT(sql)
|
||||
TARGET_LINK_LIBRARIES(sql
|
||||
TARGET_LINK_LIBRARIES(sql PUBLIC
|
||||
mysys mysys_ssl dbug strings vio pcre2-8
|
||||
${FMT_LIBRARIES}
|
||||
tpool
|
||||
|
@ -221,7 +221,7 @@ ENDIF()
|
|||
FOREACH(se aria partition perfschema sql_sequence wsrep)
|
||||
# These engines are used directly in sql sources.
|
||||
IF(TARGET ${se})
|
||||
TARGET_LINK_LIBRARIES(sql ${se})
|
||||
TARGET_LINK_LIBRARIES(sql PUBLIC ${se})
|
||||
ENDIF()
|
||||
ENDFOREACH()
|
||||
|
||||
|
|
|
@ -13,10 +13,14 @@
|
|||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
|
||||
|
||||
#include "my_json_writer.h"
|
||||
|
||||
#include "sql_class.h"
|
||||
#include "mariadb.h"
|
||||
#include "sql_priv.h"
|
||||
#include "sql_string.h"
|
||||
#include "my_json_writer.h"
|
||||
#include "log.h"
|
||||
|
||||
|
||||
#if !defined(NDEBUG) || defined(JSON_WRITER_UNIT_TEST)
|
||||
#include <iostream>
|
||||
|
@ -328,6 +332,7 @@ Json_writer_temp_disable::Json_writer_temp_disable(THD *thd_arg)
|
|||
thd= thd_arg;
|
||||
thd->opt_trace.disable_tracing_if_required();
|
||||
}
|
||||
|
||||
Json_writer_temp_disable::~Json_writer_temp_disable()
|
||||
{
|
||||
thd->opt_trace.enable_tracing_if_required();
|
||||
|
@ -497,3 +502,18 @@ void Single_line_formatting_helper::disable_and_flush()
|
|||
state= INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
Json_writer_struct::Json_writer_struct(THD *thd)
|
||||
: Json_writer_struct(thd->opt_trace.get_current_json())
|
||||
{
|
||||
}
|
||||
|
||||
Json_writer_object::Json_writer_object(THD *thd, const char *str)
|
||||
: Json_writer_object(thd->opt_trace.get_current_json(), str)
|
||||
{
|
||||
}
|
||||
|
||||
Json_writer_array::Json_writer_array(THD *thd, const char *str)
|
||||
: Json_writer_array(thd->opt_trace.get_current_json(), str)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#define JSON_WRITER_INCLUDED
|
||||
|
||||
#include "my_base.h"
|
||||
#include "sql_select.h"
|
||||
|
||||
#if !defined(NDEBUG) || defined(JSON_WRITER_UNIT_TEST) || defined ENABLED_JSON_WRITER_CONSISTENCY_CHECKS
|
||||
#include <set>
|
||||
|
@ -32,6 +31,7 @@ constexpr uint FAKE_SELECT_LEX_ID= UINT_MAX;
|
|||
// Also, mock objects are defined in my_json_writer-t.cc
|
||||
#define VALIDITY_ASSERT(x) if (!(x)) this->invalid_json= true;
|
||||
#else
|
||||
#include "sql_select.h"
|
||||
#define VALIDITY_ASSERT(x) DBUG_ASSERT(x)
|
||||
#endif
|
||||
|
||||
|
@ -40,8 +40,11 @@ constexpr uint FAKE_SELECT_LEX_ID= UINT_MAX;
|
|||
class Opt_trace_stmt;
|
||||
class Opt_trace_context;
|
||||
class Json_writer;
|
||||
struct TABLE;
|
||||
struct TABLE_LIST;
|
||||
|
||||
struct st_join_table;
|
||||
using JOIN_TAB= struct st_join_table;
|
||||
|
||||
/*
|
||||
Single_line_formatting_helper is used by Json_writer to do better formatting
|
||||
|
@ -387,10 +390,7 @@ protected:
|
|||
named_items_expectation.push_back(expect_named_children);
|
||||
#endif
|
||||
}
|
||||
explicit Json_writer_struct(THD *thd)
|
||||
: Json_writer_struct(thd->opt_trace.get_current_json())
|
||||
{
|
||||
}
|
||||
explicit Json_writer_struct(THD *thd);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -446,10 +446,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
explicit Json_writer_object(THD* thd, const char *str= nullptr)
|
||||
: Json_writer_object(thd->opt_trace.get_current_json(), str)
|
||||
{
|
||||
}
|
||||
explicit Json_writer_object(THD* thd, const char *str= nullptr);
|
||||
|
||||
~Json_writer_object()
|
||||
{
|
||||
|
@ -619,10 +616,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
explicit Json_writer_array(THD *thd, const char *str= nullptr)
|
||||
: Json_writer_array(thd->opt_trace.get_current_json(), str)
|
||||
{
|
||||
}
|
||||
explicit Json_writer_array(THD *thd, const char *str= nullptr);
|
||||
|
||||
~Json_writer_array()
|
||||
{
|
||||
|
|
|
@ -1254,6 +1254,14 @@ void SQL_SELECT::cleanup()
|
|||
close_cached_file(&file);
|
||||
}
|
||||
|
||||
int SQL_SELECT::skip_record(THD *thd)
|
||||
{
|
||||
int rc= MY_TEST(!cond || cond->val_int());
|
||||
if (thd->is_error())
|
||||
rc= -1;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
SQL_SELECT::~SQL_SELECT()
|
||||
{
|
||||
|
@ -1872,6 +1880,20 @@ QUICK_RANGE::QUICK_RANGE()
|
|||
min_keypart_map(0), max_keypart_map(0)
|
||||
{}
|
||||
|
||||
QUICK_RANGE::QUICK_RANGE(THD *thd, const uchar *min_key_arg, uint min_length_arg, key_part_map min_keypart_map_arg, const uchar *max_key_arg, uint max_length_arg, key_part_map max_keypart_map_arg, uint flag_arg)
|
||||
: min_key((uchar*) thd->memdup(min_key_arg, min_length_arg + 1)),
|
||||
max_key((uchar*) thd->memdup(max_key_arg, max_length_arg + 1)),
|
||||
min_length((uint16) min_length_arg),
|
||||
max_length((uint16) max_length_arg),
|
||||
flag((uint16) flag_arg),
|
||||
min_keypart_map(min_keypart_map_arg),
|
||||
max_keypart_map(max_keypart_map_arg)
|
||||
{
|
||||
#ifdef HAVE_valgrind
|
||||
dummy=0;
|
||||
#endif
|
||||
}
|
||||
|
||||
SEL_ARG::SEL_ARG(SEL_ARG &arg) :Sql_alloc()
|
||||
{
|
||||
type=arg.type;
|
||||
|
@ -10848,6 +10870,31 @@ uint SEL_ARG::get_max_key_part() const
|
|||
return max_part;
|
||||
}
|
||||
|
||||
SEL_ARG *SEL_ARG::clone_and(THD *thd, SEL_ARG *arg)
|
||||
{ // Get overlapping range
|
||||
uchar *new_min,*new_max;
|
||||
uint8 flag_min,flag_max;
|
||||
if (cmp_min_to_min(arg) >= 0)
|
||||
{
|
||||
new_min=min_value; flag_min=min_flag;
|
||||
}
|
||||
else
|
||||
{
|
||||
new_min=arg->min_value; flag_min=arg->min_flag; /* purecov: deadcode */
|
||||
}
|
||||
if (cmp_max_to_max(arg) <= 0)
|
||||
{
|
||||
new_max=max_value; flag_max=max_flag;
|
||||
}
|
||||
else
|
||||
{
|
||||
new_max=arg->max_value; flag_max=arg->max_flag;
|
||||
}
|
||||
return new (thd->mem_root) SEL_ARG(field, part, new_min, new_max, flag_min,
|
||||
flag_max,
|
||||
MY_TEST(maybe_flag && arg->maybe_flag));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Remove the SEL_ARG graph elements which have part > max_part.
|
||||
|
@ -16568,3 +16615,11 @@ void print_keyparts_name(String *out, const KEY_PART_INFO *key_part,
|
|||
}
|
||||
out->append(STRING_WITH_LEN(")"));
|
||||
}
|
||||
|
||||
bool RANGE_OPT_PARAM::statement_should_be_aborted() const
|
||||
{
|
||||
return thd->killed
|
||||
|| thd->is_fatal_error
|
||||
|| thd->is_error()
|
||||
|| alloced_sel_args > SEL_ARG::MAX_SEL_ARGS;
|
||||
}
|
||||
|
|
|
@ -36,8 +36,9 @@
|
|||
#include "sql_class.h" // set_var.h: THD
|
||||
#include "set_var.h" /* Item */
|
||||
|
||||
class JOIN;
|
||||
class Item_sum;
|
||||
class JOIN;
|
||||
class Unique;
|
||||
|
||||
struct KEY_PART {
|
||||
uint16 key,part;
|
||||
|
@ -389,30 +390,7 @@ public:
|
|||
{
|
||||
return sel_cmp(field,max_value, arg->min_value, max_flag, arg->min_flag);
|
||||
}
|
||||
SEL_ARG *clone_and(THD *thd, SEL_ARG* arg)
|
||||
{ // Get overlapping range
|
||||
uchar *new_min,*new_max;
|
||||
uint8 flag_min,flag_max;
|
||||
if (cmp_min_to_min(arg) >= 0)
|
||||
{
|
||||
new_min=min_value; flag_min=min_flag;
|
||||
}
|
||||
else
|
||||
{
|
||||
new_min=arg->min_value; flag_min=arg->min_flag; /* purecov: deadcode */
|
||||
}
|
||||
if (cmp_max_to_max(arg) <= 0)
|
||||
{
|
||||
new_max=max_value; flag_max=max_flag;
|
||||
}
|
||||
else
|
||||
{
|
||||
new_max=arg->max_value; flag_max=arg->max_flag;
|
||||
}
|
||||
return new (thd->mem_root) SEL_ARG(field, part, new_min, new_max, flag_min,
|
||||
flag_max,
|
||||
MY_TEST(maybe_flag && arg->maybe_flag));
|
||||
}
|
||||
SEL_ARG *clone_and(THD *thd, SEL_ARG* arg);
|
||||
SEL_ARG *clone_first(SEL_ARG *arg)
|
||||
{ // min <= X < arg->min
|
||||
return new SEL_ARG(field,part, min_value, arg->min_value,
|
||||
|
@ -733,14 +711,7 @@ public:
|
|||
bool force_default_mrr;
|
||||
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
|
||||
|
||||
bool statement_should_be_aborted() const
|
||||
{
|
||||
return
|
||||
thd->killed ||
|
||||
thd->is_fatal_error ||
|
||||
thd->is_error() ||
|
||||
alloced_sel_args > SEL_ARG::MAX_SEL_ARGS;
|
||||
}
|
||||
bool statement_should_be_aborted() const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -763,21 +734,9 @@ class QUICK_RANGE :public Sql_alloc {
|
|||
QUICK_RANGE(); /* Full range */
|
||||
QUICK_RANGE(THD *thd, const uchar *min_key_arg, uint min_length_arg,
|
||||
key_part_map min_keypart_map_arg,
|
||||
const uchar *max_key_arg, uint max_length_arg,
|
||||
const uchar *max_key_arg, uint max_length_arg,
|
||||
key_part_map max_keypart_map_arg,
|
||||
uint flag_arg)
|
||||
: min_key((uchar*) thd->memdup(min_key_arg, min_length_arg + 1)),
|
||||
max_key((uchar*) thd->memdup(max_key_arg, max_length_arg + 1)),
|
||||
min_length((uint16) min_length_arg),
|
||||
max_length((uint16) max_length_arg),
|
||||
flag((uint16) flag_arg),
|
||||
min_keypart_map(min_keypart_map_arg),
|
||||
max_keypart_map(max_keypart_map_arg)
|
||||
{
|
||||
#ifdef HAVE_valgrind
|
||||
dummy=0;
|
||||
#endif
|
||||
}
|
||||
uint flag_arg);
|
||||
|
||||
/**
|
||||
Initalizes a key_range object for communication with storage engine.
|
||||
|
@ -1724,13 +1683,7 @@ class SQL_SELECT :public Sql_alloc {
|
|||
-1 if error
|
||||
1 otherwise
|
||||
*/
|
||||
inline int skip_record(THD *thd)
|
||||
{
|
||||
int rc= MY_TEST(!cond || cond->val_int());
|
||||
if (thd->is_error())
|
||||
rc= -1;
|
||||
return rc;
|
||||
}
|
||||
int skip_record(THD *thd);
|
||||
int test_quick_select(THD *thd, key_map keys, table_map prev_tables,
|
||||
ha_rows limit, bool force_quick_range,
|
||||
bool ordered_output, bool remove_false_parts_of_where,
|
||||
|
|
|
@ -241,6 +241,11 @@ const uchar *sys_var::global_value_ptr(THD *thd, const LEX_CSTRING *base) const
|
|||
return global_var_ptr();
|
||||
}
|
||||
|
||||
uchar *sys_var::session_var_ptr(THD *thd) const
|
||||
{
|
||||
return ((uchar*)&(thd->variables)) + offset;
|
||||
}
|
||||
|
||||
bool sys_var::check(THD *thd, set_var *var)
|
||||
{
|
||||
if (unlikely((var->value && do_check(thd, var)) ||
|
||||
|
|
|
@ -247,8 +247,7 @@ protected:
|
|||
Typically it's the same as session_value_ptr(), but it's different,
|
||||
for example, for ENUM, that is printed as a string, but stored as a number.
|
||||
*/
|
||||
uchar *session_var_ptr(THD *thd) const
|
||||
{ return ((uchar*)&(thd->variables)) + offset; }
|
||||
uchar *session_var_ptr(THD *thd) const;
|
||||
|
||||
uchar *global_var_ptr() const
|
||||
{ return ((uchar*)&global_system_variables) + offset; }
|
||||
|
|
|
@ -54,6 +54,8 @@ it into the slow query log.
|
|||
#ifndef SQL_EXPLAIN_INCLUDED
|
||||
#define SQL_EXPLAIN_INCLUDED
|
||||
|
||||
class select_result_sink;
|
||||
|
||||
class String_list: public List<char>
|
||||
{
|
||||
public:
|
||||
|
|
|
@ -12569,6 +12569,13 @@ inline void JOIN::eval_select_list_used_tables()
|
|||
}
|
||||
}
|
||||
|
||||
JOIN_TAB *JOIN::get_sort_by_join_tab()
|
||||
{
|
||||
return (need_tmp || !sort_by_table || skip_sort_order
|
||||
|| ((group || tmp_table_param.sum_func_count) && !group_list))
|
||||
? nullptr : join_tab+const_tables;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Determine {after which table we'll produce ordered set}
|
||||
|
@ -13991,6 +13998,17 @@ bool JOIN_TAB::pfs_batch_update(JOIN *join)
|
|||
(!select_cond || !select_cond->with_subquery()); // 3
|
||||
}
|
||||
|
||||
int st_join_table::get_non_merged_semijoin_select() const
|
||||
{
|
||||
Item_in_subselect *subq;
|
||||
if (table->pos_in_table_list &&
|
||||
(subq= table->pos_in_table_list->jtbm_subselect))
|
||||
{
|
||||
return subq->unit->first_select()->select_number;
|
||||
}
|
||||
return 0; /* Not a merged semi-join */
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Build a TABLE_REF structure for index lookup in the temporary table
|
||||
|
@ -30240,3 +30258,48 @@ static bool process_direct_rownum_comparison(THD *thd, SELECT_LEX_UNIT *unit,
|
|||
/**
|
||||
@} (end of group Query_Optimizer)
|
||||
*/
|
||||
|
||||
store_key::store_key_result store_key_item::copy_inner()
|
||||
{
|
||||
TABLE *table= to_field->table;
|
||||
MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
|
||||
&table->write_set);
|
||||
int res= FALSE;
|
||||
|
||||
/*
|
||||
It looks like the next statement is needed only for a simplified
|
||||
hash function over key values used now in BNLH join.
|
||||
When the implementation of this function will be replaced for a proper
|
||||
full version this statement probably should be removed.
|
||||
*/
|
||||
to_field->reset();
|
||||
|
||||
if (use_value)
|
||||
item->save_val(to_field);
|
||||
else
|
||||
res= item->save_in_field(to_field, 1);
|
||||
/*
|
||||
Item::save_in_field() may call Item::val_xxx(). And if this is a subquery
|
||||
we need to check for errors executing it and react accordingly
|
||||
*/
|
||||
if (!res && table->in_use->is_error())
|
||||
res= 1; /* STORE_KEY_FATAL */
|
||||
dbug_tmp_restore_column_map(&table->write_set, old_map);
|
||||
null_key= to_field->is_null() || item->null_value;
|
||||
return ((err != 0 || res < 0 || res > 2)
|
||||
? STORE_KEY_FATAL : (store_key_result) res);
|
||||
}
|
||||
|
||||
store_key::store_key_result store_key::copy(THD *thd)
|
||||
{
|
||||
enum store_key_result result;
|
||||
enum_check_fields org_count_cuted_fields= thd->count_cuted_fields;
|
||||
sql_mode_t org_sql_mode= thd->variables.sql_mode;
|
||||
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
|
||||
thd->variables.sql_mode|= MODE_INVALID_DATES;
|
||||
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
|
||||
result= copy_inner();
|
||||
thd->count_cuted_fields= org_count_cuted_fields;
|
||||
thd->variables.sql_mode= org_sql_mode;
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -30,11 +30,17 @@
|
|||
|
||||
#include "procedure.h"
|
||||
#include "sql_array.h" /* Array */
|
||||
#include "sql_class.h"
|
||||
#include "sql_lex.h"
|
||||
#include "records.h" /* READ_RECORD */
|
||||
#include "opt_range.h" /* SQL_SELECT, QUICK_SELECT_I */
|
||||
#include "filesort.h"
|
||||
|
||||
typedef struct st_join_table JOIN_TAB;
|
||||
class POSITION;
|
||||
#ifndef TMP_ENGINE_COLUMNDEF
|
||||
class TMP_ENGINE_COLUMNDEF;
|
||||
#endif
|
||||
|
||||
/* Values in optimize */
|
||||
#define KEY_OPTIMIZE_EXISTS 1U
|
||||
#define KEY_OPTIMIZE_REF_OR_NULL 2U
|
||||
|
@ -199,6 +205,8 @@ enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF,
|
|||
JT_HASH, JT_HASH_RANGE, JT_HASH_NEXT, JT_HASH_INDEX_MERGE};
|
||||
|
||||
class JOIN;
|
||||
struct st_join_table;
|
||||
using JOIN_TAB= struct st_join_table;
|
||||
|
||||
enum enum_nested_loop_state
|
||||
{
|
||||
|
@ -250,7 +258,7 @@ class Filesort;
|
|||
struct SplM_plan_info;
|
||||
class SplM_opt_info;
|
||||
|
||||
typedef struct st_join_table {
|
||||
struct st_join_table {
|
||||
TABLE *table;
|
||||
TABLE_LIST *tab_list;
|
||||
KEYUSE *keyuse; /**< pointer to first used key */
|
||||
|
@ -649,16 +657,7 @@ typedef struct st_join_table {
|
|||
If this join_tab reads a non-merged semi-join (also called jtbm), return
|
||||
the select's number. Otherwise, return 0.
|
||||
*/
|
||||
int get_non_merged_semijoin_select() const
|
||||
{
|
||||
Item_in_subselect *subq;
|
||||
if (table->pos_in_table_list &&
|
||||
(subq= table->pos_in_table_list->jtbm_subselect))
|
||||
{
|
||||
return subq->unit->first_select()->select_number;
|
||||
}
|
||||
return 0; /* Not a merged semi-join */
|
||||
}
|
||||
int get_non_merged_semijoin_select() const;
|
||||
|
||||
bool access_from_tables_is_allowed(table_map used_tables,
|
||||
table_map sjm_lookup_tables)
|
||||
|
@ -686,7 +685,7 @@ typedef struct st_join_table {
|
|||
table_map remaining_tables);
|
||||
bool fix_splitting(SplM_plan_info *spl_plan, table_map remaining_tables,
|
||||
bool is_const_table);
|
||||
} JOIN_TAB;
|
||||
};
|
||||
|
||||
|
||||
#include "sql_join_cache.h"
|
||||
|
@ -1663,12 +1662,7 @@ public:
|
|||
Return the table for which an index scan can be used to satisfy
|
||||
the sort order needed by the ORDER BY/(implicit) GROUP BY clause
|
||||
*/
|
||||
JOIN_TAB *get_sort_by_join_tab()
|
||||
{
|
||||
return (need_tmp || !sort_by_table || skip_sort_order ||
|
||||
((group || tmp_table_param.sum_func_count) && !group_list)) ?
|
||||
NULL : join_tab+const_tables;
|
||||
}
|
||||
JOIN_TAB *get_sort_by_join_tab();
|
||||
bool setup_subquery_caches();
|
||||
bool shrink_join_buffers(JOIN_TAB *jt,
|
||||
ulonglong curr_space,
|
||||
|
@ -1839,19 +1833,7 @@ public:
|
|||
@details this function makes sure truncation warnings when preparing the
|
||||
key buffers don't end up as errors (because of an enclosing INSERT/UPDATE).
|
||||
*/
|
||||
enum store_key_result copy(THD *thd)
|
||||
{
|
||||
enum store_key_result result;
|
||||
enum_check_fields org_count_cuted_fields= thd->count_cuted_fields;
|
||||
sql_mode_t org_sql_mode= thd->variables.sql_mode;
|
||||
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
|
||||
thd->variables.sql_mode|= MODE_INVALID_DATES;
|
||||
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
|
||||
result= copy_inner();
|
||||
thd->count_cuted_fields= org_count_cuted_fields;
|
||||
thd->variables.sql_mode= org_sql_mode;
|
||||
return result;
|
||||
}
|
||||
enum store_key_result copy(THD *thd);
|
||||
|
||||
protected:
|
||||
Field *to_field; // Store data here
|
||||
|
@ -1937,36 +1919,7 @@ public:
|
|||
const char *name() const override { return "func"; }
|
||||
|
||||
protected:
|
||||
enum store_key_result copy_inner() override
|
||||
{
|
||||
TABLE *table= to_field->table;
|
||||
MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
|
||||
&table->write_set);
|
||||
int res= FALSE;
|
||||
|
||||
/*
|
||||
It looks like the next statement is needed only for a simplified
|
||||
hash function over key values used now in BNLH join.
|
||||
When the implementation of this function will be replaced for a proper
|
||||
full version this statement probably should be removed.
|
||||
*/
|
||||
to_field->reset();
|
||||
|
||||
if (use_value)
|
||||
item->save_val(to_field);
|
||||
else
|
||||
res= item->save_in_field(to_field, 1);
|
||||
/*
|
||||
Item::save_in_field() may call Item::val_xxx(). And if this is a subquery
|
||||
we need to check for errors executing it and react accordingly
|
||||
*/
|
||||
if (!res && table->in_use->is_error())
|
||||
res= 1; /* STORE_KEY_FATAL */
|
||||
dbug_tmp_restore_column_map(&table->write_set, old_map);
|
||||
null_key= to_field->is_null() || item->null_value;
|
||||
return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL :
|
||||
(store_key_result) res);
|
||||
}
|
||||
enum store_key_result copy_inner() override;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -6794,3 +6794,9 @@ static Sys_var_ulonglong Sys_max_rowid_filter_size(
|
|||
SESSION_VAR(max_rowid_filter_size), CMD_LINE(REQUIRED_ARG),
|
||||
VALID_RANGE(1024, (ulonglong)~(intptr)0), DEFAULT(128*1024),
|
||||
BLOCK_SIZE(1));
|
||||
|
||||
bool Sys_var_mybool::session_update(THD *thd, set_var *var)
|
||||
{
|
||||
session_var(thd, my_bool)= var->save_result.ulonglong_value != 0;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -460,11 +460,7 @@ public:
|
|||
SYSVAR_ASSERT(getopt.arg_type == OPT_ARG || getopt.id < 0);
|
||||
SYSVAR_ASSERT(size == sizeof(my_bool));
|
||||
}
|
||||
bool session_update(THD *thd, set_var *var)
|
||||
{
|
||||
session_var(thd, my_bool)= var->save_result.ulonglong_value != 0;
|
||||
return false;
|
||||
}
|
||||
bool session_update(THD *thd, set_var *var);
|
||||
bool global_update(THD *thd, set_var *var)
|
||||
{
|
||||
global_var(my_bool)= var->save_result.ulonglong_value != 0;
|
||||
|
|
|
@ -31,6 +31,7 @@ ADD_DEPENDENCIES(mf_iocache-t GenError)
|
|||
MY_ADD_TEST(mf_iocache)
|
||||
|
||||
# Json writer needs String which needs sql library
|
||||
#ADD_EXECUTABLE(my_json_writer-t my_json_writer-t.cc dummy_builtins.cc)
|
||||
#TARGET_LINK_LIBRARIES(my_json_writer-t sql mytap)
|
||||
#MY_ADD_TEST(my_json_writer)
|
||||
ADD_EXECUTABLE(my_json_writer-t my_json_writer-t.cc dummy_builtins.cc)
|
||||
TARGET_LINK_LIBRARIES(my_json_writer-t PUBLIC sql mytap)
|
||||
TARGET_COMPILE_DEFINITIONS(my_json_writer-t PUBLIC JSON_WRITER_UNIT_TEST)
|
||||
MY_ADD_TEST(my_json_writer)
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
*/
|
||||
|
||||
struct TABLE;
|
||||
struct JOIN_TAB;
|
||||
class Json_writer;
|
||||
|
||||
|
||||
|
@ -39,13 +38,15 @@ public:
|
|||
Json_writer *get_current_json() { return nullptr; }
|
||||
};
|
||||
|
||||
class THD
|
||||
class THD
|
||||
{
|
||||
public:
|
||||
Opt_trace opt_trace;
|
||||
};
|
||||
|
||||
#ifndef JSON_WRITER_UNIT_TEST
|
||||
#define JSON_WRITER_UNIT_TEST
|
||||
#endif
|
||||
#include "../sql/my_json_writer.h"
|
||||
#include "../sql/my_json_writer.cc"
|
||||
|
||||
|
@ -124,19 +125,15 @@ int main(int args, char **argv)
|
|||
w.start_object();
|
||||
w.add_member("name").add_ll(1);
|
||||
w.add_member("name").add_ll(2);
|
||||
w.end_object();
|
||||
ok(w.invalid_json, "JSON object member name collision");
|
||||
}
|
||||
|
||||
{
|
||||
Json_writer w;
|
||||
w.start_object();
|
||||
w.add_member("name").add_ll(1);
|
||||
w.start_object();
|
||||
w.add_member("name").start_object();
|
||||
w.add_member("name").add_ll(2);
|
||||
w.end_object();
|
||||
w.end_object();
|
||||
ok(!w.invalid_json, "Valid JSON: nested object member name is the same");
|
||||
ok(!w.invalid_json, "This must be valid JSON: nested object member has the same name");
|
||||
}
|
||||
|
||||
diag("Done");
|
||||
|
|
Loading…
Add table
Reference in a new issue