Merge branch '11.4' into 11.7

* rpl.rpl_system_versioning_partitions updated for MDEV-32188
* innodb.row_size_error_log_warnings_3 changed error for MDEV-33658
  (checks are done in a different order)
This commit is contained in:
Sergei Golubchik 2025-02-06 16:46:02 +01:00
commit ba01c2aaf0
470 changed files with 12024 additions and 6918 deletions

View file

@ -9701,8 +9701,7 @@ String *Field_set::val_str(String *val_buffer,
ulonglong tmp=(ulonglong) Field_enum::val_int();
uint bitnr=0;
val_buffer->set_charset(field_charset());
val_buffer->length(0);
val_buffer->copy("", 0, field_charset());
while (tmp && bitnr < (uint) m_typelib->count)
{

View file

@ -653,6 +653,9 @@ public:
bool cleanup_session_expr();
bool fix_and_check_expr(THD *thd, TABLE *table);
inline bool is_equal(const Virtual_column_info* vcol) const;
/* Same as is_equal() but for comparing with different table */
bool is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share,
const Virtual_column_info* vcol, bool &error) const;
inline void print(String*);
};
@ -5972,7 +5975,7 @@ uint pack_length_to_packflag(uint type);
enum_field_types get_blob_type_from_length(ulong length);
int set_field_to_null(Field *field);
int set_field_to_null_with_conversions(Field *field, bool no_conversions);
int convert_null_to_field_value_or_error(Field *field);
int convert_null_to_field_value_or_error(Field *field, uint err);
bool check_expression(Virtual_column_info *vcol, const Lex_ident_column &name,
enum_vcol_info_type type, Alter_info *alter_info= NULL);

View file

@ -126,7 +126,7 @@ static int set_bad_null_error(Field *field, int err)
return 0;
case CHECK_FIELD_ERROR_FOR_NULL:
if (!field->table->in_use->no_errors)
my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name.str);
my_error(err, MYF(0), field->field_name.str);
return -1;
}
DBUG_ASSERT(0); // impossible
@ -164,7 +164,7 @@ int set_field_to_null(Field *field)
If no_conversion was not set, an error message is printed
*/
int convert_null_to_field_value_or_error(Field *field)
int convert_null_to_field_value_or_error(Field *field, uint err)
{
if (field->type() == MYSQL_TYPE_TIMESTAMP)
{
@ -172,14 +172,16 @@ int convert_null_to_field_value_or_error(Field *field)
return 0;
}
MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, &field->table->write_set);
field->reset(); // Note: we ignore any potential failure of reset() here.
dbug_tmp_restore_column_map(&field->table->write_set, old_map);
if (field == field->table->next_number_field)
{
field->table->auto_increment_field_not_null= FALSE;
return 0; // field is set in fill_record()
}
return set_bad_null_error(field, ER_BAD_NULL_ERROR);
return set_bad_null_error(field, err);
}
/**
@ -216,7 +218,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
if (no_conversions)
return -1;
return convert_null_to_field_value_or_error(field);
return convert_null_to_field_value_or_error(field, ER_BAD_NULL_ERROR);
}

View file

@ -730,13 +730,6 @@ static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
}
#ifndef DBUG_OFF
/* Buffer where record is returned */
char dbug_print_row_buff[512];
/* Temporary buffer for printing a column */
char dbug_print_row_buff_tmp[512];
/*
Print table's current row into a buffer and return a pointer to it.
@ -749,38 +742,53 @@ char dbug_print_row_buff_tmp[512];
Only columns in table->read_set are printed
*/
const char* dbug_print_table_row(TABLE *table)
const char* dbug_print_row(TABLE *table, const uchar *rec, bool print_names)
{
Field **pfield;
String tmp(dbug_print_row_buff_tmp,
sizeof(dbug_print_row_buff_tmp),&my_charset_bin);
const size_t alloc_size= 512;
char *row_buff= (char *) alloc_root(&table->mem_root, alloc_size);
char *row_buff_tmp= (char *) alloc_root(&table->mem_root, alloc_size);
String tmp(row_buff_tmp, alloc_size, &my_charset_bin);
String output(row_buff, alloc_size, &my_charset_bin);
String output(dbug_print_row_buff, sizeof(dbug_print_row_buff),
&my_charset_bin);
auto move_back_lambda= [table, rec]() mutable {
table->move_fields(table->field, table->record[0], rec);
};
auto move_back_guard= make_scope_exit(move_back_lambda, false);
if (rec != table->record[0])
{
table->move_fields(table->field, rec, table->record[0]);
move_back_guard.engage();
}
SCOPE_VALUE(table->read_set, (table->read_set && table->write_set) ?
table->write_set : table->read_set);
output.length(0);
output.append(table->alias);
output.append('(');
bool first= true;
for (pfield= table->field; *pfield ; pfield++)
if (print_names)
{
const LEX_CSTRING *name;
if (table->read_set && !bitmap_is_set(table->read_set, (*pfield)->field_index))
continue;
if (first)
first= false;
else
output.append(',');
for (pfield= table->field; *pfield ; pfield++)
{
if (table->read_set && !bitmap_is_set(table->read_set, (*pfield)->field_index))
continue;
name= (*pfield)->field_name.str ? &(*pfield)->field_name: &NULL_clex_str;
output.append(name);
if (first)
first= false;
else
output.append(STRING_WITH_LEN(", "));
output.append((*pfield)->field_name.str
? (*pfield)->field_name : NULL_clex_str);
}
output.append(STRING_WITH_LEN(")=("));
first= true;
}
output.append(STRING_WITH_LEN(")=("));
first= true;
for (pfield= table->field; *pfield ; pfield++)
{
Field *field= *pfield;
@ -791,7 +799,7 @@ const char* dbug_print_table_row(TABLE *table)
if (first)
first= false;
else
output.append(',');
output.append(STRING_WITH_LEN(", "));
if (field->is_null())
output.append(&NULL_clex_str);
@ -805,17 +813,14 @@ const char* dbug_print_table_row(TABLE *table)
}
}
output.append(')');
return output.c_ptr_safe();
}
const char* dbug_print_row(TABLE *table, uchar *rec)
const char* dbug_print_table_row(TABLE *table)
{
table->move_fields(table->field, rec, table->record[0]);
const char* ret= dbug_print_table_row(table);
table->move_fields(table->field, table->record[0], rec);
return ret;
return dbug_print_row(table, table->record[0]);
}

View file

@ -3226,6 +3226,24 @@ err1:
DBUG_RETURN(2);
}
Compare_keys ha_partition::compare_key_parts(
const Field &old_field,
const Column_definition &new_field,
const KEY_PART_INFO &old_part,
const KEY_PART_INFO &new_part) const
{
Compare_keys res= m_file[0]->compare_key_parts(old_field, new_field,
old_part, new_part);
/*
Partitions have the same storage engine (until MDEV-22168) so the
calls should all return the same value for now.
*/
for (uint i= 1; i < m_tot_parts; i++)
if (res != m_file[i]->compare_key_parts(old_field, new_field,
old_part, new_part))
return Compare_keys::NotEqual;
return res;
}
/**
Setup m_engine_array

View file

@ -484,6 +484,11 @@ public:
m_part_info= part_info;
m_is_sub_partitioned= part_info->is_sub_partitioned();
}
Compare_keys compare_key_parts(
const Field &old_field,
const Column_definition &new_field,
const KEY_PART_INFO &old_part,
const KEY_PART_INFO &new_part) const override;
void return_record_by_parent() override;

View file

@ -1800,7 +1800,7 @@ int ha_commit_trans(THD *thd, bool all)
thd->m_transaction_psi= NULL;
}
#ifdef WITH_WSREP
if (wsrep_is_active(thd) && is_real_trans && !error)
if (WSREP(thd) && wsrep_is_active(thd) && is_real_trans && !error)
wsrep_commit_empty(thd, all);
#endif /* WITH_WSREP */
@ -2347,9 +2347,14 @@ int ha_rollback_trans(THD *thd, bool all)
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
#ifdef WITH_WSREP
WSREP_WARN("handlerton rollback failed, thd %lld %lld conf %d SQL %s",
thd->thread_id, thd->query_id, thd->wsrep_trx().state(),
thd->query());
if (WSREP(thd))
{
WSREP_WARN("handlerton rollback failed, thd %lld %lld "
"conf %d wsrep_err %s SQL %s",
thd->thread_id, thd->query_id, thd->wsrep_trx().state(),
wsrep::to_c_string(thd->wsrep_cs().current_error()),
thd->query());
}
#endif /* WITH_WSREP */
}
status_var_increment(thd->status_var.ha_rollback_count);
@ -2361,11 +2366,12 @@ int ha_rollback_trans(THD *thd, bool all)
}
#ifdef WITH_WSREP
if (thd->is_error())
if (WSREP(thd) && thd->is_error())
{
WSREP_DEBUG("ha_rollback_trans(%lld, %s) rolled back: %s: %s; is_real %d",
thd->thread_id, all?"TRUE":"FALSE", wsrep_thd_query(thd),
thd->get_stmt_da()->message(), is_real_trans);
WSREP_DEBUG("ha_rollback_trans(%lld, %s) rolled back: msg %s is_real %d wsrep_err %s",
thd->thread_id, all? "TRUE" : "FALSE",
thd->get_stmt_da()->message(), is_real_trans,
wsrep::to_c_string(thd->wsrep_cs().current_error()));
}
// REPLACE|INSERT INTO ... SELECT uses TOI in consistency check
@ -6283,7 +6289,10 @@ int handler::calculate_checksum()
for (;;)
{
if (thd->killed)
return HA_ERR_ABORTED_BY_USER;
{
error= HA_ERR_ABORTED_BY_USER;
break;
}
ha_checksum row_crc= 0;
error= ha_rnd_next(table->record[0]);
@ -8187,6 +8196,7 @@ int handler::ha_write_row(const uchar *buf)
TABLE_IO_WAIT(tracker, PSI_TABLE_WRITE_ROW, MAX_KEY, error,
{ error= write_row(buf); })
DBUG_PRINT("dml", ("INSERT: %s = %d", dbug_print_row(table, buf, false), error));
MYSQL_INSERT_ROW_DONE(error);
if (!error && !((error= table->hlindexes_on_insert())))
@ -8200,6 +8210,7 @@ int handler::ha_write_row(const uchar *buf)
ht->flags & HTON_WSREP_REPLICATION &&
!error && (error= wsrep_after_row(ha_thd())))
{
DEBUG_SYNC_C("ha_write_row_end");
DBUG_RETURN(error);
}
#endif /* WITH_WSREP */
@ -8238,6 +8249,8 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, active_index, 0,
{ error= update_row(old_data, new_data);})
DBUG_PRINT("dml", ("UPDATE: %s => %s = %d", dbug_print_row(table, old_data, false),
dbug_print_row(table, new_data, false), error));
MYSQL_UPDATE_ROW_DONE(error);
if (likely(!error) && !(error= table->hlindexes_on_update()))
@ -8315,6 +8328,7 @@ int handler::ha_delete_row(const uchar *buf)
TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, active_index, error,
{ error= delete_row(buf);})
DBUG_PRINT("dml", ("DELETE: %s = %d", dbug_print_row(table, buf, false), error));
MYSQL_DELETE_ROW_DONE(error);
if (likely(!error) && !(error= table->hlindexes_on_delete(buf)))
{

View file

@ -5835,4 +5835,8 @@ inline void Cost_estimate::reset(handler *file)
int get_select_field_pos(Alter_info *alter_info, int select_field_count,
bool versioned);
#ifndef DBUG_OFF
const char* dbug_print_row(TABLE *table, const uchar *rec, bool print_names= true);
#endif /* DBUG_OFF */
#endif /* HANDLER_INCLUDED */

View file

@ -839,6 +839,30 @@ bool Item_field::rename_fields_processor(void *arg)
return 0;
}
/**
Rename table and clean field for EXCHANGE comparison
*/
bool Item_field::rename_table_processor(void *arg)
{
Item::func_processor_rename_table *p= (Item::func_processor_rename_table*) arg;
/* If (db_name, table_name) matches (p->old_db, p->old_table)
rename to (p->new_db, p->new_table) */
if (((!db_name.str && !p->old_db.str) ||
db_name.streq(p->old_db)) &&
((!table_name.str && !p->old_table.str) ||
table_name.streq(p->old_table)))
{
db_name= p->new_db;
table_name= p->new_table;
}
/* Item_field equality is done by field pointer if it is set, we need to avoid that */
field= NULL;
return 0;
}
/**
Check if an Item_field references some field from a list of fields.
@ -4008,7 +4032,7 @@ void Item_string::print(String *str, enum_query_type query_type)
}
else
{
str_value.print(str, system_charset_info);
str_value.print(str, &my_charset_utf8mb4_general_ci);
}
}
else
@ -4275,6 +4299,7 @@ void Item_param::set_null(const DTCollation &c)
decimals= 0;
collation= c;
state= NULL_VALUE;
value.set_handler(&type_handler_null);
DBUG_VOID_RETURN;
}
@ -5104,7 +5129,10 @@ void Item_param::set_default(bool set_type_handler_null)
*/
null_value= true;
if (set_type_handler_null)
{
value.set_handler(&type_handler_null);
set_handler(&type_handler_null);
}
}
void Item_param::set_ignore(bool set_type_handler_null)
@ -5113,7 +5141,10 @@ void Item_param::set_ignore(bool set_type_handler_null)
state= IGNORE_VALUE;
null_value= true;
if (set_type_handler_null)
{
value.set_handler(&type_handler_null);
set_handler(&type_handler_null);
}
}
/**

View file

@ -2370,6 +2370,7 @@ public:
virtual bool check_partition_func_processor(void *arg) { return true; }
virtual bool post_fix_fields_part_expr_processor(void *arg) { return 0; }
virtual bool rename_fields_processor(void *arg) { return 0; }
virtual bool rename_table_processor(void *arg) { return 0; }
/*
TRUE if the function is knowingly TRUE or FALSE.
Not to be used for AND/OR formulas.
@ -2398,6 +2399,13 @@ public:
LEX_CSTRING table_name;
List<Create_field> fields;
};
struct func_processor_rename_table
{
Lex_ident_db old_db;
Lex_ident_table old_table;
Lex_ident_db new_db;
Lex_ident_table new_table;
};
virtual bool check_vcol_func_processor(void *arg)
{
return mark_unsupported_function(full_name(), arg, VCOL_IMPOSSIBLE);
@ -3269,8 +3277,9 @@ public:
bool append_for_log(THD *thd, String *str) override;
Item *do_get_copy(THD *) const override { return nullptr; }
Item *do_build_clone(THD *thd) const override { return nullptr; }
Item *do_get_copy(THD *thd) const override
{ return get_item_copy<Item_splocal>(thd, this); }
Item *do_build_clone(THD *thd) const override { return get_copy(thd); }
/*
Override the inherited create_field_for_create_select(),
@ -3865,6 +3874,7 @@ public:
bool switch_to_nullable_fields_processor(void *arg) override;
bool update_vcol_processor(void *arg) override;
bool rename_fields_processor(void *arg) override;
bool rename_table_processor(void *arg) override;
bool check_vcol_func_processor(void *arg) override;
bool set_fields_as_dependent_processor(void *arg) override
{

View file

@ -6155,7 +6155,7 @@ String *Item_func_wsrep_last_written_gtid::val_str_ascii(String *str)
{
if (gtid_str.alloc(WSREP_MAX_WSREP_SERVER_GTID_STR_LEN+1))
{
my_error(ER_OUTOFMEMORY, WSREP_MAX_WSREP_SERVER_GTID_STR_LEN);
my_error(ER_OUTOFMEMORY, MYF(0), WSREP_MAX_WSREP_SERVER_GTID_STR_LEN);
null_value= TRUE;
return 0;
}
@ -6180,7 +6180,7 @@ String *Item_func_wsrep_last_seen_gtid::val_str_ascii(String *str)
{
if (gtid_str.alloc(WSREP_MAX_WSREP_SERVER_GTID_STR_LEN+1))
{
my_error(ER_OUTOFMEMORY, WSREP_MAX_WSREP_SERVER_GTID_STR_LEN);
my_error(ER_OUTOFMEMORY, MYF(0), WSREP_MAX_WSREP_SERVER_GTID_STR_LEN);
null_value= TRUE;
return 0;
}
@ -6225,7 +6225,7 @@ longlong Item_func_wsrep_sync_wait_upto::val_int()
if (!(gtid_list= gtid_parse_string_to_list(gtid_str->ptr(), gtid_str->length(),
&count)))
{
my_error(ER_INCORRECT_GTID_STATE, MYF(0), func_name());
my_error(ER_INCORRECT_GTID_STATE, MYF(0));
null_value= TRUE;
return 0;
}
@ -6237,12 +6237,12 @@ longlong Item_func_wsrep_sync_wait_upto::val_int()
wait_gtid_ret= wsrep_gtid_server.wait_gtid_upto(gtid_list[0].seq_no, timeout);
if ((wait_gtid_ret == ETIMEDOUT) || (wait_gtid_ret == ETIME))
{
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0), func_name());
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
ret= 0;
}
else if (wait_gtid_ret == ENOMEM)
{
my_error(ER_OUTOFMEMORY, MYF(0), func_name());
my_error(ER_OUTOFMEMORY, MYF(0), sizeof(std::pair<uint64, mysql_cond_t*>));
ret= 0;
}
}

View file

@ -1277,6 +1277,8 @@ public:
return mark_unsupported_function(fully_qualified_func_name().str, arg,
VCOL_SESSION_FUNC);
}
Item *do_get_copy(THD *thd) const override
{ return get_item_copy<Item_func_current_user>(thd, this); }
};
class Item_func_session_user :public Item_func_user

View file

@ -1243,6 +1243,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
Item *item) {
return
!item->with_sum_func() &&
!item->with_window_func() &&
/*
We can't change name of Item_field or Item_ref, because it will
prevent its correct resolving, but we should save name of
@ -3850,7 +3851,7 @@ void subselect_single_select_engine::cleanup()
DBUG_ENTER("subselect_single_select_engine::cleanup");
prepared= executed= 0;
join= 0;
result->cleanup();
result->reset_for_next_ps_execution();
select_lex->uncacheable&= ~UNCACHEABLE_DEPENDENT_INJECTED;
DBUG_VOID_RETURN;
}
@ -3860,7 +3861,7 @@ void subselect_union_engine::cleanup()
{
DBUG_ENTER("subselect_union_engine::cleanup");
unit->reinit_exec_mechanism();
result->cleanup();
result->reset_for_next_ps_execution();
unit->uncacheable&= ~UNCACHEABLE_DEPENDENT_INJECTED;
for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select())
sl->uncacheable&= ~UNCACHEABLE_DEPENDENT_INJECTED;
@ -5538,7 +5539,7 @@ void subselect_hash_sj_engine::cleanup()
}
DBUG_ASSERT(lookup_engine->engine_type() == UNIQUESUBQUERY_ENGINE);
lookup_engine->cleanup();
result->cleanup(); /* Resets the temp table as well. */
result->reset_for_next_ps_execution(); /* Resets the temp table as well. */
DBUG_ASSERT(tmp_table);
free_tmp_table(thd, tmp_table);
tmp_table= NULL;

View file

@ -540,35 +540,6 @@ void Item_sum::fix_num_length_and_dec()
max_length=float_length(decimals);
}
Item *Item_sum::get_tmp_table_item(THD *thd)
{
Item_sum* sum_item= (Item_sum *) copy_or_same(thd);
if (sum_item && sum_item->result_field) // If not a const sum func
{
Field *result_field_tmp= sum_item->result_field;
for (uint i=0 ; i < sum_item->arg_count ; i++)
{
Item *arg= sum_item->args[i];
if (!arg->const_item())
{
if (arg->type() == Item::FIELD_ITEM)
{
((Item_field*) arg)->field= result_field_tmp++;
}
else
{
auto item_field=
new (thd->mem_root) Item_field(thd, result_field_tmp++);
if (item_field)
item_field->set_refers_to_temp_table();
sum_item->args[i]= item_field;
}
}
}
}
return sum_item;
}
void Item_sum::update_used_tables ()
{

View file

@ -521,7 +521,6 @@ public:
aggregator_clear();
}
virtual void make_unique() { force_copy_fields= TRUE; }
Item *get_tmp_table_item(THD *thd) override;
virtual Field *create_tmp_field(MEM_ROOT *root, bool group, TABLE *table);
Field *create_tmp_field_ex(MEM_ROOT *root, TABLE *table, Tmp_field_src *src,
const Tmp_field_param *param) override

View file

@ -811,6 +811,7 @@ bool Log_to_csv_event_handler::
{
TABLE_LIST table_list;
TABLE *table;
const char *cause= 0;
bool result= TRUE;
bool need_close= FALSE;
bool need_pop= FALSE;
@ -844,13 +845,19 @@ bool Log_to_csv_event_handler::
need_pop= TRUE;
if (!(table= open_log_table(thd, &table_list, &open_tables_backup)))
{
cause= "can't open file";
goto err;
}
need_close= TRUE;
if (table->file->extra(HA_EXTRA_MARK_AS_LOG_TABLE) ||
table->file->ha_rnd_init_with_error(0))
{
cause= "can't initialize table handler";
goto err;
}
need_rnd_end= TRUE;
@ -869,12 +876,20 @@ bool Log_to_csv_event_handler::
/* check that all columns exist */
if (table->s->fields < 6)
{
cause= "incorrect number of fields in the log table";
goto err;
}
DBUG_ASSERT(table->field[0]->type() == MYSQL_TYPE_TIMESTAMP);
table->field[0]->store_timestamp(
hrtime_to_my_time(event_time), hrtime_sec_part(event_time));
if (table->field[0]->store_timestamp(hrtime_to_my_time(event_time),
hrtime_sec_part(event_time)))
{
cause= "Can't write data (possible incorrect log table structure)";
goto err;
}
/* do a write */
if (table->field[1]->store(user_host, user_host_len, client_cs) ||
@ -882,7 +897,10 @@ bool Log_to_csv_event_handler::
table->field[3]->store((longlong) global_system_variables.server_id,
TRUE) ||
table->field[4]->store(command_type, command_type_len, client_cs))
{
cause= "Can't write data (possible incorrect log table structure)";
goto err;
}
/*
A positive return value in store() means truncation.
@ -890,7 +908,10 @@ bool Log_to_csv_event_handler::
*/
table->field[5]->flags|= FIELDFLAG_HEX_ESCAPE;
if (table->field[5]->store(sql_text, sql_text_len, client_cs) < 0)
{
cause= "Can't write data (possible incorrect log table structure)";
goto err;
}
/* mark all fields as not null */
table->field[1]->set_notnull();
@ -906,14 +927,22 @@ bool Log_to_csv_event_handler::
}
if (table->file->ha_write_row(table->record[0]))
{
cause= "Can't write record";
goto err;
}
result= FALSE;
err:
if (result && !thd->killed)
{
const char *msg= error_handler.message();
if (!msg || !msg[0])
msg= cause;
sql_print_error("Failed to write to mysql.general_log: %s",
error_handler.message());
msg);
}
if (need_rnd_end)
{
@ -966,6 +995,8 @@ bool Log_to_csv_event_handler::
{
TABLE_LIST table_list;
TABLE *table;
const char *cause= 0;
const char *msg;
bool result= TRUE;
bool need_close= FALSE;
bool need_rnd_end= FALSE;
@ -985,13 +1016,19 @@ bool Log_to_csv_event_handler::
TL_WRITE_CONCURRENT_INSERT);
if (!(table= open_log_table(thd, &table_list, &open_tables_backup)))
{
cause= "can't open file";
goto err;
}
need_close= TRUE;
if (table->file->extra(HA_EXTRA_MARK_AS_LOG_TABLE) ||
table->file->ha_rnd_init_with_error(0))
{
cause= "can't initialize table handler";
goto err;
}
need_rnd_end= TRUE;
@ -1002,12 +1039,19 @@ bool Log_to_csv_event_handler::
/* check that all columns exist */
if (table->s->fields < 13)
{
cause= "incorrect number of fields in the log table";
goto err;
}
// It can be used in 13 places below so assign it here
cause= "Can't write data (possible incorrect log table structure)";
/* store the time and user values */
DBUG_ASSERT(table->field[0]->type() == MYSQL_TYPE_TIMESTAMP);
table->field[0]->store_timestamp(
hrtime_to_my_time(current_time), hrtime_sec_part(current_time));
if(table->field[0]->store_timestamp(hrtime_to_my_time(current_time),
hrtime_sec_part(current_time)))
goto err;
if (table->field[1]->store(user_host, user_host_len, client_cs))
goto err;
@ -1087,9 +1131,13 @@ bool Log_to_csv_event_handler::
(longlong) thd->get_stmt_da()->affected_rows() :
0, TRUE))
goto err;
cause= 0; // just for safety
if (table->file->ha_write_row(table->record[0]))
{
cause= "Can't write record";
goto err;
}
result= FALSE;
@ -1097,8 +1145,13 @@ err:
thd->pop_internal_handler();
if (result && !thd->killed)
{
msg= error_handler.message();
if (!msg || !msg[0])
msg= cause;
sql_print_error("Failed to write to mysql.slow_log: %s",
error_handler.message());
msg);
}
if (need_rnd_end)
{
@ -3720,6 +3773,9 @@ void MYSQL_BIN_LOG::init_pthread_objects()
&COND_binlog_background_thread, 0);
mysql_cond_init(key_BINLOG_COND_binlog_background_thread_end,
&COND_binlog_background_thread_end, 0);
/* Fix correct mutex order to catch violations quicker (MDEV-35197). */
mysql_mutex_record_order(&LOCK_log, &LOCK_global_system_variables);
}
@ -12311,7 +12367,8 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name,
cur_log= first_log;
for (round= 1;;)
{
while ((ev= Log_event::read_log_event(cur_log, fdle,
int error;
while ((ev= Log_event::read_log_event(cur_log, &error, fdle,
opt_master_verify_checksum))
&& ev->is_valid())
{
@ -12705,7 +12762,8 @@ MYSQL_BIN_LOG::do_binlog_recovery(const char *opt_name, bool do_xa_recovery)
return 1;
}
if ((ev= Log_event::read_log_event(&log, &fdle,
int read_error;
if ((ev= Log_event::read_log_event(&log, &read_error, &fdle,
opt_master_verify_checksum)) &&
ev->get_type_code() == FORMAT_DESCRIPTION_EVENT)
{
@ -12790,6 +12848,7 @@ binlog_checksum_update(MYSQL_THD thd, struct st_mysql_sys_var *var,
bool check_purge= false;
ulong UNINIT_VAR(prev_binlog_id);
mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(mysql_bin_log.get_log_lock());
if(mysql_bin_log.is_open())
{
@ -12808,6 +12867,7 @@ binlog_checksum_update(MYSQL_THD thd, struct st_mysql_sys_var *var,
mysql_mutex_unlock(mysql_bin_log.get_log_lock());
if (check_purge)
mysql_bin_log.checkpoint_and_purge(prev_binlog_id);
mysql_mutex_lock(&LOCK_global_system_variables);
}
@ -12928,10 +12988,11 @@ get_gtid_list_event(IO_CACHE *cache, Gtid_list_log_event **out_gtid_list)
Format_description_log_event *fdle;
Log_event *ev;
const char *errormsg = NULL;
int read_error;
*out_gtid_list= NULL;
if (!(ev= Log_event::read_log_event(cache, &init_fdle,
if (!(ev= Log_event::read_log_event(cache, &read_error, &init_fdle,
opt_master_verify_checksum)) ||
ev->get_type_code() != FORMAT_DESCRIPTION_EVENT)
{
@ -12947,7 +13008,8 @@ get_gtid_list_event(IO_CACHE *cache, Gtid_list_log_event **out_gtid_list)
{
Log_event_type typ;
ev= Log_event::read_log_event(cache, fdle, opt_master_verify_checksum);
ev= Log_event::read_log_event(cache, &read_error, fdle,
opt_master_verify_checksum);
if (!ev)
{
errormsg= "Could not read GTID list event while looking for GTID "

View file

@ -874,7 +874,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
DBUG_RETURN(0);
}
Log_event* Log_event::read_log_event(IO_CACHE* file,
Log_event* Log_event::read_log_event(IO_CACHE* file, int *out_error,
const Format_description_log_event *fdle,
my_bool crc_check, my_bool print_errors,
size_t max_allowed_packet)
@ -885,6 +885,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file,
const char *error= 0;
Log_event *res= 0;
*out_error= 0;
switch (read_log_event(file, &event, fdle, BINLOG_CHECKSUM_ALG_OFF,
max_allowed_packet))
{
@ -935,14 +936,22 @@ err:
#endif
/*
The SQL slave thread will check if file->error<0 to know
The SQL slave thread will check *out_error to know
if there was an I/O error. Even if there is no "low-level" I/O errors
with 'file', any of the high-level above errors is worrying
enough to stop the SQL thread now ; as we are skipping the current event,
going on with reading and successfully executing other events can
only corrupt the slave's databases. So stop.
*/
file->error= -1;
*out_error= 1;
/*
Clear any error that might have been set in the IO_CACHE from a read
error, while we are still holding the relay log mutex (if reading from
the hot log). Otherwise the error might interfere unpredictably with
write operations to the same IO_CACHE in the IO thread.
*/
file->error= 0;
#ifndef MYSQL_CLIENT
if (!print_errors)

View file

@ -1431,18 +1431,18 @@ public:
we detect the event's type, then call the specific event's
constructor and pass description_event as an argument.
*/
static Log_event* read_log_event(IO_CACHE* file,
static Log_event* read_log_event(IO_CACHE* file, int *out_error,
const Format_description_log_event
*description_event,
my_bool crc_check, my_bool print_errors,
size_t max_allowed_packet);
static Log_event* read_log_event(IO_CACHE* file,
static Log_event* read_log_event(IO_CACHE* file, int *out_error,
const Format_description_log_event
*description_event,
my_bool crc_check, my_bool print_errors= 1)
{
return read_log_event(file, description_event, crc_check, print_errors,
get_max_packet());
return read_log_event(file, out_error, description_event, crc_check,
print_errors, get_max_packet());
}
/**
@ -3418,7 +3418,8 @@ public:
#ifdef MYSQL_SERVER
static const uint max_data_length= GTID_HEADER_LEN + 2 + sizeof(XID)
+ 1 /* flags_extra: */
+ 4 /* Extra Engines */
+ 1 /* Extra Engines */
+ 8 /* sa_seq_no */
+ 4 /* FL_EXTRA_THREAD_ID */;
Gtid_log_event(THD *thd_arg, uint64 seq_no, uint32 domain_id, bool standalone,

View file

@ -1865,8 +1865,11 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
DBUG_PRINT("query",("%s", thd->query()));
#ifdef WITH_WSREP
WSREP_DEBUG("Query_log_event thread=%llu for query=%s",
thd_get_thread_id(thd), wsrep_thd_query(thd));
if (WSREP(thd))
{
WSREP_DEBUG("Query_log_event thread=%llu for query=%s",
thd_get_thread_id(thd), wsrep_thd_query(thd));
}
#endif
if (unlikely(!(expected_error= !is_rb_alter ? error_code : 0)) ||

View file

@ -3350,10 +3350,13 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
MyFlags));
DBUG_ASSERT(str != NULL);
DBUG_ASSERT(*str != '\0');
DBUG_ASSERT(error != 0);
DBUG_ASSERT((MyFlags & ~(ME_BELL | ME_ERROR_LOG | ME_ERROR_LOG_ONLY |
ME_NOTE | ME_WARNING | ME_FATAL)) == 0);
DBUG_ASSERT(str[strlen(str)-1] != '\n');
if (MyFlags & ME_NOTE)
{
level= Sql_condition::WARN_LEVEL_NOTE;
@ -3752,22 +3755,14 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
LOCK_thd_kill here (the limit will be enforced on the next allocation).
*/
if (!mysql_mutex_trylock(&thd->LOCK_thd_kill)) {
char buf[50], *buf2;
char buf[50], buf2[256];
thd->set_killed_no_mutex(KILL_QUERY);
my_snprintf(buf, sizeof(buf), "--max-session-mem-used=%llu",
thd->variables.max_mem_used);
if ((buf2= thd->alloc(256)))
{
my_snprintf(buf2, 256,
ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf);
thd->set_killed_no_mutex(KILL_QUERY,
ER_OPTION_PREVENTS_STATEMENT, buf2);
}
else
{
thd->set_killed_no_mutex(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT,
"--max-session-mem-used");
}
my_snprintf(buf2, 256,
ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf);
thd->set_killed_no_mutex(KILL_QUERY,
ER_OPTION_PREVENTS_STATEMENT, buf2);
mysql_mutex_unlock(&thd->LOCK_thd_kill);
}
}
@ -7468,7 +7463,7 @@ static int show_max_memory_used(THD *thd, SHOW_VAR *var, void *buff,
}
static int show_binlog_space_total(THD *thd, SHOW_VAR *var, char *buff,
static int show_binlog_space_total(THD *thd, SHOW_VAR *var, void *buff,
struct system_status_var *status_var,
enum enum_var_type scope)
{
@ -7556,8 +7551,8 @@ static int show_threadpool_threads(THD *, SHOW_VAR *var, void *buff,
#endif
static int show_cached_thread_count(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope)
static int show_cached_thread_count(THD *thd, SHOW_VAR *var, void *buff,
system_status_var *, enum enum_var_type scope)
{
var->type= SHOW_LONG;
var->value= buff;
@ -10064,7 +10059,7 @@ void init_server_psi_keys(void)
*/
static my_thread_id thread_id_max= UINT_MAX32;
static my_thread_id thread_id_max= MY_THREAD_ID_MAX;
#include <vector>
#include <algorithm>

View file

@ -29,10 +29,6 @@
#include "rpl_rli.h"
#include "slave.h"
#include "log_event.h"
#ifdef WITH_WSREP
#include "wsrep_mysqld.h" // wsrep_thd_is_local
#include "wsrep_trans_observer.h" // wsrep_start_trx_if_not_started
#endif
const LEX_CSTRING rpl_gtid_slave_state_table_name=
{ STRING_WITH_LEN("gtid_slave_pos") };
@ -714,23 +710,7 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
goto end;
#ifdef WITH_WSREP
/*
We should replicate local gtid_slave_pos updates to other nodes if
wsrep gtid mode is set.
In applier we should not append them to galera writeset.
*/
if (WSREP_ON_ && wsrep_gtid_mode && wsrep_thd_is_local(thd))
{
thd->wsrep_ignore_table= false;
table->file->row_logging= 1; // replication requires binary logging
if (thd->wsrep_next_trx_id() == WSREP_UNDEFINED_TRX_ID)
thd->set_query_id(next_query_id());
wsrep_start_trx_if_not_started(thd);
}
else
{
thd->wsrep_ignore_table= true;
}
thd->wsrep_ignore_table= true; // Do not replicate mysql.gtid_slave_pos table
#endif
if (!in_transaction)
@ -767,10 +747,6 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
}
end:
#ifdef WITH_WSREP
thd->wsrep_ignore_table= false;
#endif
if (table_opened)
{
if (err || (err= ha_commit_trans(thd, FALSE)))
@ -793,6 +769,10 @@ end:
mysql_mutex_unlock(&thd->LOCK_thd_data);
thd->mdl_context.rollback_to_savepoint(m_start_of_statement_svp);
}
#ifdef WITH_WSREP
thd->wsrep_ignore_table= false;
#endif
thd->lex->restore_backup_query_tables_list(&lex_backup);
thd->variables.option_bits= thd_saved_option;
thd->resume_subsequent_commits(suspended_wfc);
@ -906,25 +886,7 @@ rpl_slave_state::gtid_delete_pending(THD *thd,
return;
#ifdef WITH_WSREP
/*
We should replicate local gtid_slave_pos updates to other nodes if
wsrep gtid mode is set.
In applier we should not append them to galera writeset.
*/
if (WSREP_ON_ && wsrep_gtid_mode &&
wsrep_thd_is_local(thd) &&
thd->wsrep_cs().state() != wsrep::client_state::s_none)
{
if (thd->wsrep_trx().active() == false)
{
if (thd->wsrep_next_trx_id() == WSREP_UNDEFINED_TRX_ID)
thd->set_query_id(next_query_id());
wsrep_start_transaction(thd, thd->wsrep_next_trx_id());
}
thd->wsrep_ignore_table= false;
}
else
thd->wsrep_ignore_table= true;
thd->wsrep_ignore_table= true; // No Galera replication for mysql.gtid_pos_table
#endif
thd_saved_option= thd->variables.option_bits;

View file

@ -1379,7 +1379,9 @@ Master_info_index::get_master_info(const LEX_CSTRING *connection_name,
connection_name->str));
/* Make name lower case for comparison */
IdentBufferCasedn<MAX_CONNECTION_NAME> buff(*connection_name);
IdentBufferCasedn<MAX_CONNECTION_NAME> buff(connection_name->str ?
*connection_name :
empty_clex_str);
mi= (Master_info*) my_hash_search(&master_info_hash,
(const uchar*) buff.ptr(), buff.length());
if (!mi && warning != Sql_condition::WARN_LEVEL_NOTE)

View file

@ -1072,14 +1072,15 @@ do_retry:
/* The loop is here so we can try again the next relay log file on EOF. */
for (;;)
{
int error;
old_offset= cur_offset;
ev= Log_event::read_log_event(&rlog, description_event,
ev= Log_event::read_log_event(&rlog, &error, description_event,
opt_slave_sql_verify_checksum);
cur_offset= my_b_tell(&rlog);
if (ev)
break;
if (unlikely(rlog.error < 0))
if (unlikely(error))
{
errmsg= "slave SQL thread aborted because of I/O error";
err= 1;

View file

@ -541,12 +541,13 @@ read_relay_log_description_event(IO_CACHE *cur_log, ulonglong start_pos,
if (my_b_tell(cur_log) >= start_pos)
break;
if (!(ev= Log_event::read_log_event(cur_log, fdev,
int read_error;
if (!(ev= Log_event::read_log_event(cur_log, &read_error, fdev,
opt_slave_sql_verify_checksum)))
{
DBUG_PRINT("info",("could not read event, cur_log->error=%d",
cur_log->error));
if (cur_log->error) /* not EOF */
DBUG_PRINT("info",("could not read event, read_error=%d",
read_error));
if (read_error) /* not EOF */
{
*errmsg= "I/O error reading event at position 4";
delete fdev;

View file

@ -938,8 +938,8 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name,
sql_print_information(
"Skipping semi-sync wait for transaction at pos %s, %lu. This "
"should be because semi-sync turned off and on during the "
"lifetime of this transaction.",
trx_wait_binlog_name, trx_wait_binlog_pos););
"lifetime of this transaction.", trx_wait_binlog_name,
static_cast<unsigned long>(trx_wait_binlog_pos)););
/* The only known reason for a missing entry at this point is if
* semi-sync was turned off then on, so on debug builds, we track

View file

@ -1,4 +1,4 @@
/* Copyright 2018-2024 Codership Oy <info@codership.com>
/* Copyright 2018-2025 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -82,11 +82,12 @@ extern "C" const char *wsrep_thd_query(const THD *thd)
case SQLCOM_REVOKE:
return "REVOKE";
case SQLCOM_SET_OPTION:
if (thd->lex->definer)
return "SET PASSWORD";
return "SET OPTION";
/* fallthrough */
default:
{
return (thd->query() ? thd->query() : "NULL");
}
}
return "NULL";
}

View file

@ -7207,19 +7207,19 @@ ER_FORBID_SCHEMA_CHANGE
spa "Vd no está autorizado a cambiar el esquema de '%-.192s' a '%-.192s'"
sw "Kubadilisha schema kutoka '%-.192s' hadi '%-.192s' hairuhusiwi"
ER_ROW_IS_REFERENCED_2 23000
chi "无法删除或更新父行:外键约束失败(%.192s"
eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)"
ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
geo "მშობელი მწკრივის წაშლა ან განახლება შეუძლებელია: გარე გასაღების შეზღუდვა შეცდომას შეიცავს (%.192s)"
spa "No puedo borrar o actualizar una fila padre: falla una restricción de clave foránea (%.192s)"
sw "Haiwezi kufuta au kusasisha safu mlalo kuu: kizuizi cha ufunguo wa kigeni hakifaulu (%.192s)"
chi "无法删除或更新父行:外键约束失败(%s"
eng "Cannot delete or update a parent row: a foreign key constraint fails (%s)"
ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%s)"
geo "მშობელი მწკრივის წაშლა ან განახლება შეუძლებელია: გარე გასაღების შეზღუდვა შეცდომას შეიცავს (%s)"
spa "No puedo borrar o actualizar una fila padre: falla una restricción de clave foránea (%s)"
sw "Haiwezi kufuta au kusasisha safu mlalo kuu: kizuizi cha ufunguo wa kigeni hakifaulu (%s)"
ER_NO_REFERENCED_ROW_2 23000
chi "无法添加或更新子行:外键约束失败(%.192s"
eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)"
ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
geo "შვილი მწკრივის წაშლა ან განახლება შეუძლებელია: გარე გასაღების შეზღუდვა შეცდომას შეიცავს (%.192s)"
spa "No puedo añadir o actualizar una fila hija: falla una restricción de clave foránea (%.192s)"
sw "Haiwezi kuongeza au kusasisha safu mlalo ya mtoto: kizuizi cha ufunguo wa kigeni hakifaulu (%.192s)"
chi "无法添加或更新子行:外键约束失败(%s"
eng "Cannot add or update a child row: a foreign key constraint fails (%s)"
ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%s)"
geo "შვილი მწკრივის წაშლა ან განახლება შეუძლებელია: გარე გასაღების შეზღუდვა შეცდომას შეიცავს (%s)"
spa "No puedo añadir o actualizar una fila hija: falla una restricción de clave foránea (%s)"
sw "Haiwezi kuongeza au kusasisha safu mlalo ya mtoto: kizuizi cha ufunguo wa kigeni hakifaulu (%s)"
ER_SP_BAD_VAR_SHADOW 42000
chi "变量'%-.64s'必须用`...`,或重命名"
eng "Variable '%-.64s' must be quoted with `...`, or renamed"

View file

@ -7228,7 +7228,8 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
MYSQL_BIN_LOG::open() will write the buffered description event.
*/
old_pos= rli->event_relay_log_pos;
if ((ev= Log_event::read_log_event(cur_log,
int error;
if ((ev= Log_event::read_log_event(cur_log, &error,
rli->relay_log.description_event_for_exec,
opt_slave_sql_verify_checksum)))
@ -7245,8 +7246,8 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
DBUG_RETURN(ev);
}
if (opt_reckless_slave) // For mysql-test
cur_log->error = 0;
if (unlikely(cur_log->error < 0))
error = 0;
if (unlikely(error))
{
errmsg = "slave SQL thread aborted because of I/O error";
if (hot_log)

View file

@ -281,6 +281,7 @@ public:
m_lex_resp= false;
/* Prevent endless recursion. */
m_lex->sphead= nullptr;
delete m_lex->result;
lex_end(m_lex);
delete m_lex;
}

View file

@ -54,6 +54,7 @@
#include "sql_array.h"
#include "sql_hset.h"
#include "password.h"
#include "scope.h"
#include "sql_plugin_compat.h"
#include "wsrep_mysqld.h"
@ -2646,10 +2647,9 @@ static LEX_STRING make_and_check_db_name(MEM_ROOT *mem_root,
static bool acl_load(THD *thd, const Grant_tables& tables)
{
READ_RECORD read_record_info;
Sql_mode_save old_mode_save(thd);
DBUG_ENTER("acl_load");
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
SCOPE_CLEAR(thd->variables.sql_mode, MODE_PAD_CHAR_TO_FULL_LENGTH);
grant_version++; /* Privileges updated */
@ -3467,7 +3467,7 @@ end:
switch (result)
{
case ER_INVALID_CURRENT_USER:
my_error(ER_INVALID_CURRENT_USER, MYF(0), rolename.str);
my_error(ER_INVALID_CURRENT_USER, MYF(0));
break;
case ER_INVALID_ROLE:
/* Role doesn't exist at all */
@ -8529,7 +8529,8 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables,
if (access)
{
switch(access->check(orig_want_access, &t_ref->grant.privilege))
switch(access->check(orig_want_access, &t_ref->grant.privilege,
any_combination_will_do))
{
case ACL_INTERNAL_ACCESS_GRANTED:
t_ref->grant.privilege|= orig_want_access;

View file

@ -213,7 +213,7 @@ public:
in save_priv.
*/
virtual ACL_internal_access_result check(privilege_t want_access,
privilege_t *save_priv) const= 0;
privilege_t *save_priv, bool any_combination_will_do) const= 0;
};
/**

View file

@ -636,7 +636,7 @@ bool Sql_cmd_alter_table::execute(THD *thd)
DBUG_RETURN(TRUE); /* purecov: inspected */
#ifdef WITH_WSREP
if (WSREP(thd) &&
if (WSREP(thd) && wsrep_thd_is_local(thd) &&
(!thd->is_current_stmt_binlog_format_row() ||
!thd->find_temporary_table(first_table)))
{

View file

@ -439,6 +439,7 @@ public:
/** Name of table for the above error. */
const char *fk_error_table= nullptr;
bool modified_primary_key= false;
bool fast_alter_partition= false;
/** Indicates that we are altering temporary table */
bool tmp_table= false;

View file

@ -784,6 +784,22 @@ close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
}
}
static inline bool check_field_pointers(const TABLE *table)
{
for (Field **pf= table->field; *pf; pf++)
{
Field *f= *pf;
if (f->ptr < table->record[0] || f->ptr > table->record[0]
+ table->s->reclength)
return false;
if (f->null_ptr &&
(f->null_ptr < table->record[0] || f->null_ptr > table->record[0]
+ table->s->reclength))
return false;
}
return true;
}
int close_thread_tables_for_query(THD *thd)
{
@ -847,6 +863,8 @@ int close_thread_tables(THD *thd)
DBUG_PRINT("tcache", ("table: '%s' query_id: %lu",
table->s->table_name.str, (ulong) table->query_id));
DBUG_SLOW_ASSERT(check_field_pointers(table));
if (thd->locked_tables_mode)
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
@ -8927,6 +8945,23 @@ static bool vers_update_or_validate_fields(TABLE *table)
}
static void unwind_stored_field_offsets(const List<Item> &fields, Field *end)
{
for (Item &item_field: fields)
{
Field *f= item_field.field_for_view_update()->field;
if (f == end)
break;
if (f->stored_in_db())
{
TABLE *table= f->table;
f->move_field_offset((my_ptrdiff_t) (table->record[0] -
table->record[1]));
}
}
}
/******************************************************************************
** Fill a record with data (for INSERT or UPDATE)
** Returns : 1 if some field has wrong type
@ -8982,7 +9017,7 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
if (!(field= fld->field_for_view_update()))
{
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name.str);
goto err;
goto err_unwind_fields;
}
value=v++;
DBUG_ASSERT(value);
@ -9014,7 +9049,7 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
if (value->save_in_field(rfield, 0) < 0 && !ignore_errors)
{
my_message(ER_UNKNOWN_ERROR, ER_THD(thd, ER_UNKNOWN_ERROR), MYF(0));
goto err;
goto err_unwind_fields;
}
rfield->set_has_explicit_value();
}
@ -9030,20 +9065,7 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
}
if (update && thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT)
{
// restore fields pointers on record[0]
f.rewind();
while ((fld= f++))
{
rfield= fld->field_for_view_update()->field;
if (rfield->stored_in_db())
{
table= rfield->table;
rfield->move_field_offset((my_ptrdiff_t) (table->record[0] -
table->record[1]));
}
}
}
unwind_stored_field_offsets(fields, NULL);
if (update)
table_arg->evaluate_update_default_function();
@ -9062,6 +9084,9 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
thd->abort_on_warning= save_abort_on_warning;
thd->no_errors= save_no_errors;
DBUG_RETURN(thd->is_error());
err_unwind_fields:
if (update && thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT)
unwind_stored_field_offsets(fields, rfield);
err:
DBUG_PRINT("error",("got error"));
thd->abort_on_warning= save_abort_on_warning;
@ -9090,7 +9115,6 @@ void switch_to_nullable_trigger_fields(List<Item> &items, TABLE *table)
while ((item= it++))
item->walk(&Item::switch_to_nullable_fields_processor, 1, field);
table->triggers->reset_extra_null_bitmap();
}
}
@ -9144,8 +9168,14 @@ static bool not_null_fields_have_null_values(TABLE *table)
swap_variables(uint32, of->flags, ff->flags);
if (ff->is_real_null())
{
uint err= ER_BAD_NULL_ERROR;
if (ff->flags & NO_DEFAULT_VALUE_FLAG && !ff->has_explicit_value())
{
err= ER_NO_DEFAULT_FOR_FIELD;
table->in_use->count_cuted_fields= CHECK_FIELD_WARN;
}
ff->set_notnull(); // for next row WHERE condition in UPDATE
if (convert_null_to_field_value_or_error(of) || thd->is_error())
if (convert_null_to_field_value_or_error(of, err) || thd->is_error())
return true;
}
}
@ -9425,8 +9455,9 @@ my_bool mysql_rm_tmp_tables(void)
memcpy(path_copy, path, path_len - ext_len);
path_copy[path_len - ext_len]= 0;
init_tmp_table_share(thd, &share, "", 0, "", path_copy, true);
handlerton *ht= share.db_type();
if (!open_table_def(thd, &share))
share.db_type()->drop_table(share.db_type(), path_copy);
ht->drop_table(share.db_type(), path_copy);
free_table_share(&share);
}
/*

View file

@ -169,7 +169,8 @@ Key::Key(const Key &rhs, MEM_ROOT *mem_root)
name(rhs.name),
option_list(rhs.option_list),
generated(rhs.generated), invisible(false),
without_overlaps(rhs.without_overlaps), old(rhs.old), period(rhs.period)
without_overlaps(rhs.without_overlaps), old(rhs.old), length(rhs.length),
period(rhs.period)
{
list_copy_and_replace_each_value(columns, mem_root);
}
@ -196,7 +197,7 @@ Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root)
/*
Test if a foreign key (= generated key) is a prefix of the given key
(ignoring key name, key type and order of columns)
(ignoring key name and type, but minding the algorithm)
NOTES:
This is only used to test if an index for a FOREIGN KEY exists
@ -211,6 +212,16 @@ Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root)
bool is_foreign_key_prefix(Key *a, Key *b)
{
ha_key_alg a_alg= a->key_create_info.algorithm;
ha_key_alg b_alg= b->key_create_info.algorithm;
// The real algorithm in InnoDB will be BTREE if none was given by user.
a_alg= a_alg == HA_KEY_ALG_UNDEF ? HA_KEY_ALG_BTREE : a_alg;
b_alg= b_alg == HA_KEY_ALG_UNDEF ? HA_KEY_ALG_BTREE : b_alg;
if (a_alg != b_alg)
return false;
/* Ensure that 'a' is the generated key */
if (a->generated)
{
@ -3187,7 +3198,7 @@ void Item_change_list::rollback_item_tree_changes()
** Functions to provide a interface to select results
*****************************************************************************/
void select_result::cleanup()
void select_result::reset_for_next_ps_execution()
{
/* do nothing */
}
@ -3256,6 +3267,7 @@ void select_send::abort_result_set()
*/
thd->spcont->end_partial_result_set= TRUE;
}
reset_for_next_ps_execution();
DBUG_VOID_RETURN;
}
@ -3266,7 +3278,7 @@ void select_send::abort_result_set()
stored procedure statement.
*/
void select_send::cleanup()
void select_send::reset_for_next_ps_execution()
{
is_result_set_started= FALSE;
}
@ -3304,7 +3316,7 @@ bool select_send::send_eof()
if (unlikely(thd->is_error()))
return TRUE;
::my_eof(thd);
is_result_set_started= 0;
reset_for_next_ps_execution();
return FALSE;
}
@ -3313,10 +3325,22 @@ bool select_send::send_eof()
Handling writing to file
************************************************************************/
bool select_to_file::free_recources()
{
if (file >= 0)
{
(void) end_io_cache(&cache);
bool error= mysql_file_close(file, MYF(MY_WME));
file= -1;
return error;
}
return FALSE;
}
bool select_to_file::send_eof()
{
int error= MY_TEST(end_io_cache(&cache));
if (unlikely(mysql_file_close(file, MYF(MY_WME))) ||
int error= false;
if (unlikely(free_recources()) ||
unlikely(thd->is_error()))
error= true;
@ -3324,20 +3348,19 @@ bool select_to_file::send_eof()
{
::my_ok(thd,row_count);
}
file= -1;
return error;
}
void select_to_file::abort_result_set()
{
select_result_interceptor::abort_result_set();
free_recources();
}
void select_to_file::cleanup()
void select_to_file::reset_for_next_ps_execution()
{
/* In case of error send_eof() may be not called: close the file here. */
if (file >= 0)
{
(void) end_io_cache(&cache);
mysql_file_close(file, MYF(0));
file= -1;
}
free_recources();
path[0]= '\0';
row_count= 0;
}
@ -3345,12 +3368,8 @@ void select_to_file::cleanup()
select_to_file::~select_to_file()
{
if (file >= 0)
{ // This only happens in case of error
(void) end_io_cache(&cache);
mysql_file_close(file, MYF(0));
file= -1;
}
DBUG_ASSERT(file < 0);
free_recources(); // just in case
}
/***************************************************************************
@ -3840,9 +3859,9 @@ int select_singlerow_subselect::send_data(List<Item> &items)
}
void select_max_min_finder_subselect::cleanup()
void select_max_min_finder_subselect::reset_for_next_ps_execution()
{
DBUG_ENTER("select_max_min_finder_subselect::cleanup");
DBUG_ENTER("select_max_min_finder_subselect::reset_for_next_ps_execution");
cache= 0;
DBUG_VOID_RETURN;
}
@ -4067,7 +4086,7 @@ bool select_dumpvar::check_simple_select() const
}
void select_dumpvar::cleanup()
void select_dumpvar::reset_for_next_ps_execution()
{
row_count= 0;
}
@ -4540,10 +4559,10 @@ void select_materialize_with_stats::reset()
}
void select_materialize_with_stats::cleanup()
void select_materialize_with_stats::reset_for_next_ps_execution()
{
reset();
select_unit::cleanup();
select_unit::reset_for_next_ps_execution();
}

View file

@ -52,6 +52,7 @@
#include "session_tracker.h"
#include "backup.h"
#include "xa.h"
#include "scope.h"
#include "ddl_log.h" /* DDL_LOG_STATE */
#include "ha_handler_stats.h" // ha_handler_stats */
@ -473,6 +474,7 @@ public:
bool invisible;
bool without_overlaps;
bool old;
uint length;
Lex_ident_column period;
Key(enum Keytype type_par, const LEX_CSTRING *name_arg,
@ -480,7 +482,7 @@ public:
:DDL_options(ddl_options),
type(type_par), key_create_info(default_key_create_info),
name(*name_arg), option_list(NULL), generated(generated_arg),
invisible(false), without_overlaps(false), old(false)
invisible(false), without_overlaps(false), old(false), length(0)
{
key_create_info.algorithm= algorithm_arg;
}
@ -491,7 +493,7 @@ public:
:DDL_options(ddl_options),
type(type_par), key_create_info(*key_info_arg), columns(*cols),
name(*name_arg), option_list(create_opt), generated(generated_arg),
invisible(false), without_overlaps(false), old(false)
invisible(false), without_overlaps(false), old(false), length(0)
{}
Key(const Key &rhs, MEM_ROOT *mem_root);
virtual ~Key() = default;
@ -6248,7 +6250,8 @@ public:
*/
virtual int send_data(List<Item> &items)=0;
virtual ~select_result_sink() = default;
void reset(THD *thd_arg) { thd= thd_arg; }
// Used in cursors to initialize and reset
void reinit(THD *thd_arg) { thd= thd_arg; }
};
class select_result_interceptor;
@ -6322,15 +6325,11 @@ public:
*/
virtual bool check_simple_select() const;
virtual void abort_result_set() {}
/*
Cleanup instance of this class for next execution of a prepared
statement/stored procedure.
*/
virtual void cleanup();
virtual void reset_for_next_ps_execution();
void set_thd(THD *thd_arg) { thd= thd_arg; }
void reset(THD *thd_arg)
void reinit(THD *thd_arg)
{
select_result_sink::reset(thd_arg);
select_result_sink::reinit(thd_arg);
unit= NULL;
}
#ifdef EMBEDDED_LIBRARY
@ -6436,9 +6435,9 @@ public:
elsewhere. (this is used by ANALYZE $stmt feature).
*/
void disable_my_ok_calls() { suppress_my_ok= true; }
void reset(THD *thd_arg)
void reinit(THD *thd_arg)
{
select_result::reset(thd_arg);
select_result::reinit(thd_arg);
suppress_my_ok= false;
}
protected:
@ -6492,7 +6491,7 @@ private:
{}
void reset(THD *thd_arg)
{
select_result_interceptor::reset(thd_arg);
select_result_interceptor::reinit(thd_arg);
spvar_list= NULL;
field_count= 0;
}
@ -6534,7 +6533,7 @@ public:
void reset(THD *thd_arg)
{
sp_cursor_statistics::reset();
result.reset(thd_arg);
result.reinit(thd_arg);
server_side_cursor= NULL;
}
@ -6561,7 +6560,7 @@ public:
bool send_eof() override;
bool check_simple_select() const override { return FALSE; }
void abort_result_set() override;
void cleanup() override;
void reset_for_next_ps_execution() override;
select_result_interceptor *result_interceptor() override { return NULL; }
};
@ -6596,7 +6595,9 @@ public:
{ path[0]=0; }
~select_to_file();
bool send_eof() override;
void cleanup() override;
void abort_result_set() override;
void reset_for_next_ps_execution() override;
bool free_recources();
};
@ -6673,7 +6674,7 @@ class select_insert :public select_result_interceptor {
bool send_eof() override;
void abort_result_set() override;
/* not implemented: select_insert is never re-used in prepared statements */
void cleanup() override;
void reset_for_next_ps_execution() override;
};
@ -6900,7 +6901,7 @@ public:
int delete_record();
bool send_eof() override;
virtual bool flush();
void cleanup() override;
void reset_for_next_ps_execution() override;
virtual bool create_result_table(THD *thd, List<Item> *column_types,
bool is_distinct, ulonglong options,
const LEX_CSTRING *alias,
@ -7075,9 +7076,10 @@ class select_union_recursive :public select_unit
*/
List<TABLE_LIST> rec_table_refs;
/*
The count of how many times cleanup() was called with cleaned==false
for the unit specifying the recursive CTE for which this object was created
or for the unit specifying a CTE that mutually recursive with this CTE.
The count of how many times reset_for_next_ps_execution() was called with
cleaned==false for the unit specifying the recursive CTE for which this
object was created or for the unit specifying a CTE that mutually
recursive with this CTE.
*/
uint cleanup_count;
long row_counter;
@ -7096,7 +7098,7 @@ class select_union_recursive :public select_unit
bool create_table,
bool keep_row_order,
uint hidden) override;
void cleanup() override;
void reset_for_next_ps_execution() override;
};
/**
@ -7166,7 +7168,7 @@ public:
{
result->abort_result_set(); /* purecov: inspected */
}
void cleanup() override
void reset_for_next_ps_execution() override
{
send_records= 0;
}
@ -7269,7 +7271,7 @@ public:
uint hidden) override;
bool init_result_table(ulonglong select_options);
int send_data(List<Item> &items) override;
void cleanup() override;
void reset_for_next_ps_execution() override;
ha_rows get_null_count_of_col(uint idx)
{
DBUG_ASSERT(idx < table->s->fields);
@ -7302,7 +7304,7 @@ public:
bool mx, bool all):
select_subselect(thd_arg, item_arg), cache(0), fmax(mx), is_all(all)
{}
void cleanup() override;
void reset_for_next_ps_execution() override;
int send_data(List<Item> &items) override;
bool cmp_real();
bool cmp_int();
@ -7732,7 +7734,7 @@ public:
int send_data(List<Item> &items) override;
bool send_eof() override;
bool check_simple_select() const override;
void cleanup() override;
void reset_for_next_ps_execution() override;
};
/* Bits in sql_command_flags */

View file

@ -773,8 +773,8 @@ bool Sql_cmd_delete::delete_from_single_table(THD *thd)
table->mark_columns_needed_for_delete();
}
if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_DELETE) &&
!table->prepare_triggers_for_delete_stmt_or_event())
if (!table->prepare_triggers_for_delete_stmt_or_event() &&
table->file->ha_table_flags() & HA_CAN_FORCE_BULK_DELETE)
will_batch= !table->file->start_bulk_delete();
/*
@ -806,27 +806,21 @@ bool Sql_cmd_delete::delete_from_single_table(THD *thd)
MEM_STRIP_BUF_SIZE);
THD_STAGE_INFO(thd, stage_searching_rows_for_update);
while (!(error=info.read_record()) && !thd->killed &&
! thd->is_error())
while (!(error=info.read_record()) && !thd->killed && !thd->is_error())
{
if (record_should_be_deleted(thd, table, select, explain, delete_history))
{
table->file->position(table->record[0]);
if (unlikely((error=
deltempfile->unique_add((char*) table->file->ref))))
{
error= 1;
goto terminate_delete;
}
if ((error= deltempfile->unique_add((char*) table->file->ref)))
break;
if (!--tmplimit && using_limit)
break;
}
}
end_read_record(&info);
if (unlikely(deltempfile->get(table)) ||
unlikely(table->file->ha_index_or_rnd_end()) ||
unlikely(init_read_record(&info, thd, table, 0, &deltempfile->sort, 0,
1, false)))
if (table->file->ha_index_or_rnd_end() || error > 0 ||
deltempfile->get(table) ||
init_read_record(&info, thd, table, 0, &deltempfile->sort, 0, 1, 0))
{
error= 1;
goto terminate_delete;
@ -1300,6 +1294,13 @@ void multi_delete::abort_result_set()
{
DBUG_ENTER("multi_delete::abort_result_set");
/****************************************************************************
NOTE: if you change here be aware that almost the same code is in
multi_delete::send_eof().
***************************************************************************/
/* the error was handled or nothing deleted and no side effects return */
if (error_handled ||
(!thd->transaction->stmt.modified_non_trans_table && !deleted))
@ -1502,6 +1503,13 @@ bool multi_delete::send_eof()
/* reset used flags */
THD_STAGE_INFO(thd, stage_end);
/****************************************************************************
NOTE: if you change here be aware that almost the same code is in
multi_delete::abort_result_set().
***************************************************************************/
if (thd->transaction->stmt.modified_non_trans_table)
thd->transaction->all.modified_non_trans_table= TRUE;
thd->transaction->all.m_unsafe_rollback_flags|=

View file

@ -693,7 +693,6 @@ Sql_condition *Warning_info::push_warning(THD *thd,
ulong current_row_number)
{
Sql_condition *cond= NULL;
DBUG_ASSERT(msg[strlen(msg)-1] != '\n');
if (! m_read_only)
{
@ -753,6 +752,7 @@ void push_warning(THD *thd, Sql_condition::enum_warning_level level,
if (level == Sql_condition::WARN_LEVEL_ERROR)
level= Sql_condition::WARN_LEVEL_WARN;
DBUG_ASSERT(msg[strlen(msg)-1] != '\n');
(void) thd->raise_condition(code, "\0\0\0\0\0", level, msg);
/* Make sure we also count warnings pushed after calling set_ok_status(). */

View file

@ -990,8 +990,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
if (fields.elements || !value_count || table_list->view != 0)
{
if (table->triggers &&
table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE))
if (table->field != table->field_to_fill())
{
/* BEFORE INSERT triggers exist, the check will be done later, per row */
}
@ -1065,7 +1064,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
INSERT INTO t1 (fields) VALUES ...
INSERT INTO t1 VALUES ()
*/
restore_record(table,s->default_values); // Get empty record
restore_default_record_for_insert(table);
table->reset_default_fields();
if (unlikely(fill_record_n_invoke_before_triggers(thd, table, fields,
@ -1094,7 +1093,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
*/
if (thd->lex->used_tables || // Column used in values()
table->s->visible_fields != table->s->fields)
restore_record(table,s->default_values); // Get empty record
restore_default_record_for_insert(table);
else
{
TABLE_SHARE *share= table->s;
@ -1131,24 +1130,6 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
}
}
/*
with triggers a field can get a value *conditionally*, so we have to
repeat has_no_default_value() check for every row
*/
if (table->triggers &&
table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE))
{
for (Field **f=table->field ; *f ; f++)
{
if (unlikely(!(*f)->has_explicit_value() &&
has_no_default_value(thd, *f, table_list)))
{
error= 1;
goto values_loop_end;
}
}
}
if ((res= table_list->view_check_option(thd, ignore)) == VIEW_CHECK_SKIP)
continue;
else if (res == VIEW_CHECK_ERROR)
@ -2219,6 +2200,9 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
!table->file->referenced_by_foreign_key() &&
(!table->triggers || !table->triggers->has_delete_triggers()))
{
/*
Optimized dup handling via UPDATE (and insert history for versioned).
*/
if (table->versioned(VERS_TRX_ID))
{
DBUG_ASSERT(table->vers_write);
@ -2254,25 +2238,39 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
}
else
{
/*
Normal dup handling via DELETE (or UPDATE to history for versioned)
and repeating the cycle of INSERT.
*/
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_BEFORE, TRUE))
goto before_trg_err;
if (!table->versioned(VERS_TIMESTAMP))
bool do_delete= !table->versioned(VERS_TIMESTAMP);
if (do_delete)
error= table->file->ha_delete_row(table->record[1]);
else
{
/* Update existing row to history */
store_record(table, record[2]);
restore_record(table, record[1]);
table->vers_update_end();
error= table->file->ha_update_row(table->record[1],
table->record[0]);
restore_record(table, record[2]);
if (error == HA_ERR_FOUND_DUPP_KEY || /* Unique index, any SE */
error == HA_ERR_FOREIGN_DUPLICATE_KEY || /* Unique index, InnoDB */
error == HA_ERR_RECORD_IS_THE_SAME) /* No index */
{
/* Such history row was already generated from previous cycles */
error= table->file->ha_delete_row(table->record[1]);
do_delete= true;
}
}
if (unlikely(error))
goto err;
if (!table->versioned(VERS_TIMESTAMP))
if (do_delete)
info->deleted++;
else
info->updated++;
@ -2520,6 +2518,7 @@ public:
delayed_insert_threads--;
my_free(thd.query());
thd.reset_query_inner();
thd.security_ctx->user= 0;
thd.security_ctx->host= 0;
}
@ -2772,12 +2771,21 @@ end_create:
DBUG_RETURN(thd->is_error());
}
#define memdup_vcol(thd, vcol) \
if (vcol) \
{ \
(vcol)= (Virtual_column_info*)(thd)->memdup((vcol), sizeof(*(vcol))); \
(vcol)->expr= NULL; \
static inline
bool memdup_vcol(THD *thd, Virtual_column_info *&vcol)
{
if (vcol)
{
vcol= (Virtual_column_info*)(thd->memdup(vcol, sizeof(*vcol)));
if (!vcol)
{
my_error(ER_OUT_OF_RESOURCES, MYF(0));
return true;
}
vcol->expr= NULL;
}
return false;
}
/**
As we can't let many client threads modify the same TABLE
@ -2921,9 +2929,12 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
(*field)->flags|= ((*org_field)->flags & LONG_UNIQUE_HASH_FIELD);
(*field)->invisible= (*org_field)->invisible;
memdup_vcol(client_thd, (*field)->vcol_info);
memdup_vcol(client_thd, (*field)->default_value);
memdup_vcol(client_thd, (*field)->check_constraint);
if (memdup_vcol(client_thd, (*field)->vcol_info))
goto error;
if (memdup_vcol(client_thd, (*field)->default_value))
goto error;
if (memdup_vcol(client_thd, (*field)->check_constraint))
goto error;
if (*org_field == found_next_number_field)
(*field)->table->found_next_number_field= *field;
}
@ -2940,6 +2951,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
&error_reported,
VCOL_INIT_DEPENDENCY_FAILURE_IS_WARNING)))
goto error;
copy->update_keypart_vcol_info();
}
switch_defaults_to_nullable_trigger_fields(copy);
@ -4172,7 +4185,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
*/
table->file->ha_start_bulk_insert((ha_rows) 0);
}
restore_record(table,s->default_values); // Get empty record
restore_default_record_for_insert(table);
table->reset_default_fields();
table->next_number_field=table->found_next_number_field;
@ -4253,7 +4266,7 @@ int select_insert::prepare2(JOIN *)
}
void select_insert::cleanup()
void select_insert::reset_for_next_ps_execution()
{
/* select_insert/select_create are never re-used in prepared statement */
DBUG_ASSERT(0);
@ -4315,7 +4328,7 @@ int select_insert::send_data(List<Item> &values)
originally touched by INSERT ... SELECT, so we have to restore
their original values for the next row.
*/
restore_record(table, s->default_values);
restore_default_record_for_insert(table);
}
if (table->next_number_field)
{
@ -4365,6 +4378,13 @@ bool select_insert::prepare_eof()
DBUG_PRINT("enter", ("trans_table: %d, table_type: '%s'",
trans_table, table->file->table_type()));
/****************************************************************************
NOTE: if you change here be aware that almost the same code is in
select_insert::abort_result_set().
****************************************************************************/
error= IF_WSREP(thd->wsrep_cs().current_error(), 0) ? -1 :
(thd->locked_tables_mode <= LTM_LOCK_TABLES) ?
table->file->ha_end_bulk_insert() : 0;
@ -4506,6 +4526,12 @@ void select_insert::abort_result_set()
*/
if (table && table->file->is_open())
{
/****************************************************************************
NOTE: if you change here be aware that almost the same code is in
select_insert::prepare_eof().
****************************************************************************/
bool changed, transactional_table;
/*
If we are not in prelocked mode, we end the bulk insert started
@ -4533,7 +4559,14 @@ void select_insert::abort_result_set()
If table creation failed, the number of rows modified will also be
zero, so no check for that is made.
*/
changed= (info.copied || info.deleted || info.updated);
if ((changed= (info.copied || info.deleted || info.updated)))
{
/*
We must invalidate the table in the query cache before binlog writing
and ha_autocommit_or_rollback.
*/
query_cache_invalidate3(thd, table, 1);
}
transactional_table= table->file->has_transactions_and_rollback();
if (thd->transaction->stmt.modified_non_trans_table ||
thd->log_current_statement())

View file

@ -47,6 +47,13 @@ void kill_delayed_threads(void);
bool binlog_create_table(THD *thd, TABLE *table, bool replace);
bool binlog_drop_table(THD *thd, TABLE *table);
static inline void restore_default_record_for_insert(TABLE *t)
{
restore_record(t,s->default_values);
if (t->triggers)
t->triggers->default_extra_null_bitmap();
}
#ifdef EMBEDDED_LIBRARY
inline void kill_delayed_threads(void) {}
#endif

View file

@ -4616,7 +4616,11 @@ public:
case SQLCOM_LOAD:
return duplicates == DUP_REPLACE;
default:
return false;
/*
Row injections (i.e. row binlog events and BINLOG statements) should
generate history.
*/
return is_stmt_row_injection();
}
}

View file

@ -23,7 +23,6 @@
#include "sql_priv.h"
#include "unireg.h"
#include "sql_load.h"
#include "sql_load.h"
#include "sql_cache.h" // query_cache_*
#include "sql_base.h" // fill_record_n_invoke_before_triggers
#include <my_dir.h>
@ -725,7 +724,15 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
table->file->print_error(my_errno, MYF(0));
error= 1;
}
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
if (!error)
{
int err= table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
if (err == HA_ERR_FOUND_DUPP_KEY)
{
error= 1;
my_error(ER_ERROR_DURING_COMMIT, MYF(0), 1);
}
}
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
table->next_number_field=0;
}
@ -1057,8 +1064,7 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
read_info.row_end[0]=0;
#endif
restore_record(table, s->default_values);
restore_default_record_for_insert(table);
while ((item= it++))
{
Load_data_outvar *dst= item->get_load_data_outvar();
@ -1171,8 +1177,8 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->progress.max_counter);
}
}
restore_record(table, s->default_values);
restore_default_record_for_insert(table);
while ((item= it++))
{
uint length;
@ -1326,8 +1332,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
}
#endif
restore_record(table, s->default_values);
restore_default_record_for_insert(table);
while ((item= it++))
{
/* If this line is to be skipped we don't want to fill field or var */

View file

@ -439,7 +439,6 @@ bool stmt_causes_implicit_commit(THD *thd, uint mask)
case SQLCOM_DROP_TABLE:
case SQLCOM_DROP_SEQUENCE:
case SQLCOM_CREATE_TABLE:
case SQLCOM_CREATE_SEQUENCE:
/*
If CREATE TABLE of non-temporary table and the table is not part
if a BEGIN GTID ... COMMIT group, do a implicit commit.
@ -4778,17 +4777,18 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
lex->create_info.set(DDL_options_st::OPT_IF_EXISTS);
#ifdef WITH_WSREP
if (WSREP(thd))
if (WSREP(thd) && !lex->tmp_table() && wsrep_thd_is_local(thd) &&
(!thd->is_current_stmt_binlog_format_row() ||
wsrep_table_list_has_non_temp_tables(thd, all_tables)))
{
for (TABLE_LIST *table= all_tables; table; table= table->next_global)
wsrep::key_array keys;
if (wsrep_append_fk_parent_table(thd, all_tables, &keys))
{
if (!lex->tmp_table() &&
(!thd->is_current_stmt_binlog_format_row() ||
!is_temporary_table(table)))
{
WSREP_TO_ISOLATION_BEGIN(NULL, NULL, all_tables);
break;
}
goto wsrep_error_label;
}
if (wsrep_to_isolation_begin(thd, NULL, NULL, all_tables, NULL, &keys))
{
goto wsrep_error_label;
}
}
#endif /* WITH_WSREP */

View file

@ -712,9 +712,11 @@ static bool handle_list_of_fields(THD *thd, List_iterator<const char> it,
}
else
{
if (table->s->db_type()->partition_flags &&
(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
(table->s->db_type()->partition_flags() & HA_CAN_PARTITION))
handlerton *ht= table->s->db_type();
if (ht->partition_flags &&
((ht->partition_flags() &
(HA_USE_AUTO_PARTITION | HA_CAN_PARTITION)) ==
(HA_USE_AUTO_PARTITION | HA_CAN_PARTITION)))
{
/*
This engine can handle automatic partitioning and there is no
@ -1917,6 +1919,7 @@ bool fix_partition_func(THD *thd, TABLE *table, bool is_create_table_ind)
bool result= TRUE;
partition_info *part_info= table->part_info;
enum_column_usage saved_column_usage= thd->column_usage;
handlerton *ht;
DBUG_ENTER("fix_partition_func");
if (part_info->fixed)
@ -2046,8 +2049,9 @@ bool fix_partition_func(THD *thd, TABLE *table, bool is_create_table_ind)
goto end;
if (unlikely(check_primary_key(table)))
goto end;
if (unlikely((!(table->s->db_type()->partition_flags &&
(table->s->db_type()->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
ht= table->s->db_type();
if (unlikely((!(ht->partition_flags &&
(ht->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
check_unique_keys(table)))
goto end;
if (unlikely(set_up_partition_bitmaps(thd, part_info)))
@ -2768,12 +2772,14 @@ bool partition_key_modified(TABLE *table, const MY_BITMAP *fields)
{
Field **fld;
partition_info *part_info= table->part_info;
handlerton *ht;
DBUG_ENTER("partition_key_modified");
if (!part_info)
DBUG_RETURN(FALSE);
if (table->s->db_type()->partition_flags &&
(table->s->db_type()->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
ht= table->s->db_type();
if (ht->partition_flags &&
(ht->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
DBUG_RETURN(FALSE);
for (fld= part_info->full_part_field_array; *fld; fld++)
if (bitmap_is_set(fields, (*fld)->field_index))
@ -4990,11 +4996,10 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
if default partitioning is used.
*/
handlerton *ht= table->s->db_type();
if (tab_part_info->part_type != HASH_PARTITION ||
((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
!tab_part_info->use_default_num_partitions) ||
((!(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)) &&
tab_part_info->use_default_num_partitions))
!(ht->partition_flags() & HA_USE_AUTO_PARTITION) ==
tab_part_info->use_default_num_partitions)
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
goto err;

View file

@ -241,6 +241,8 @@ bool compare_table_with_partition(THD *thd, TABLE *table, TABLE *part_table,
part_create_info.row_type= table->s->row_type;
}
part_create_info.table= part_table;
/*
NOTE: ha_blackhole does not support check_if_compatible_data,
so this always fail for blackhole tables.

View file

@ -3178,14 +3178,14 @@ void sync_dynamic_session_variables(THD* thd, bool global_lock)
If required, will sync with global variables if the requested variable
has not yet been allocated in the current thread.
*/
static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
static void *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
{
DBUG_ENTER("intern_sys_var_ptr");
DBUG_ASSERT(offset >= 0);
DBUG_ASSERT((uint)offset <= global_system_variables.dynamic_variables_head);
if (!thd)
DBUG_RETURN((uchar*) global_system_variables.dynamic_variables_ptr + offset);
DBUG_RETURN(global_system_variables.dynamic_variables_ptr + offset);
/*
dynamic_variables_head points to the largest valid offset
@ -3197,7 +3197,7 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
sync_dynamic_session_variables(thd, global_lock);
mysql_prlock_unlock(&LOCK_system_variables_hash);
}
DBUG_RETURN((uchar*)thd->variables.dynamic_variables_ptr + offset);
DBUG_RETURN(thd->variables.dynamic_variables_ptr + offset);
}
@ -3211,42 +3211,47 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
static char *mysql_sys_var_char(THD* thd, int offset)
{
return (char *) intern_sys_var_ptr(thd, offset, true);
return static_cast<char*>(intern_sys_var_ptr(thd, offset, true));
}
static int *mysql_sys_var_int(THD* thd, int offset)
{
return (int *) intern_sys_var_ptr(thd, offset, true);
return static_cast<int*>(intern_sys_var_ptr(thd, offset, true));
}
static unsigned int *mysql_sys_var_uint(THD* thd, int offset)
{
return static_cast<unsigned int*>(intern_sys_var_ptr(thd, offset, true));
}
static long *mysql_sys_var_long(THD* thd, int offset)
{
return (long *) intern_sys_var_ptr(thd, offset, true);
return static_cast<long*>(intern_sys_var_ptr(thd, offset, true));
}
static unsigned long *mysql_sys_var_ulong(THD* thd, int offset)
{
return (unsigned long *) intern_sys_var_ptr(thd, offset, true);
return static_cast<unsigned long*>(intern_sys_var_ptr(thd, offset, true));
}
static long long *mysql_sys_var_longlong(THD* thd, int offset)
{
return (long long *) intern_sys_var_ptr(thd, offset, true);
return static_cast<long long*>(intern_sys_var_ptr(thd, offset, true));
}
static unsigned long long *mysql_sys_var_ulonglong(THD* thd, int offset)
{
return (unsigned long long *) intern_sys_var_ptr(thd, offset, true);
return static_cast<unsigned long long*>(intern_sys_var_ptr(thd, offset, true));
}
static char **mysql_sys_var_str(THD* thd, int offset)
{
return (char **) intern_sys_var_ptr(thd, offset, true);
return static_cast<char**>(intern_sys_var_ptr(thd, offset, true));
}
static double *mysql_sys_var_double(THD* thd, int offset)
{
return (double *) intern_sys_var_ptr(thd, offset, true);
return static_cast<double*>(intern_sys_var_ptr(thd, offset, true));
}
void plugin_thdvar_init(THD *thd)
@ -3527,7 +3532,7 @@ uchar* sys_var_pluginvar::real_value_ptr(THD *thd, enum_var_type type) const
if (type == OPT_GLOBAL)
thd= NULL;
return intern_sys_var_ptr(thd, *(int*) (plugin_var+1), false);
return (uchar*) intern_sys_var_ptr(thd, *(int*) (plugin_var+1), false);
}
return *(uchar**) (plugin_var+1);
}
@ -3536,8 +3541,8 @@ uchar* sys_var_pluginvar::real_value_ptr(THD *thd, enum_var_type type) const
bool sys_var_pluginvar::session_is_default(THD *thd)
{
uchar *value= plugin_var->flags & PLUGIN_VAR_THDLOCAL
? intern_sys_var_ptr(thd, *(int*) (plugin_var+1), true)
: *(uchar**) (plugin_var+1);
? static_cast<uchar*>(intern_sys_var_ptr(thd, *(int*) (plugin_var+1), true))
: *reinterpret_cast<uchar**>(plugin_var+1);
real_value_ptr(thd, OPT_SESSION);
@ -3777,27 +3782,27 @@ void plugin_opt_set_limits(struct my_option *options,
break;
case PLUGIN_VAR_ENUM | PLUGIN_VAR_THDLOCAL:
options->var_type= GET_ENUM;
options->typelib= ((thdvar_enum_t*) opt)->typelib;
options->def_value= ((thdvar_enum_t*) opt)->def_val;
options->typelib= reinterpret_cast<const thdvar_enum_t*>(opt)->typelib;
options->def_value= reinterpret_cast<const thdvar_enum_t*>(opt)->def_val;
options->min_value= options->block_size= 0;
options->max_value= options->typelib->count - 1;
break;
case PLUGIN_VAR_SET | PLUGIN_VAR_THDLOCAL:
options->var_type= GET_SET;
options->typelib= ((thdvar_set_t*) opt)->typelib;
options->def_value= ((thdvar_set_t*) opt)->def_val;
options->typelib= reinterpret_cast<const thdvar_set_t*>(opt)->typelib;
options->def_value= reinterpret_cast<const thdvar_set_t*>(opt)->def_val;
options->min_value= options->block_size= 0;
options->max_value= (1ULL << options->typelib->count) - 1;
break;
case PLUGIN_VAR_BOOL | PLUGIN_VAR_THDLOCAL:
options->var_type= GET_BOOL;
options->def_value= ((thdvar_bool_t*) opt)->def_val;
options->def_value= reinterpret_cast<const thdvar_bool_t*>(opt)->def_val;
options->typelib= &bool_typelib;
break;
case PLUGIN_VAR_STR | PLUGIN_VAR_THDLOCAL:
options->var_type= ((opt->flags & PLUGIN_VAR_MEMALLOC) ?
GET_STR_ALLOC : GET_STR);
options->def_value= (intptr) ((thdvar_str_t*) opt)->def_val;
options->def_value= reinterpret_cast<intptr_t>(reinterpret_cast<const thdvar_str_t*>(opt)->def_val);
break;
default:
DBUG_ASSERT(0);
@ -3836,7 +3841,7 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
size_t plugin_name_len= strlen(plugin_name);
size_t optnamelen;
const int max_comment_len= 255;
char *comment= (char *) alloc_root(mem_root, max_comment_len + 1);
char *comment= static_cast<char*>(alloc_root(mem_root, max_comment_len + 1));
char *optname;
int index= 0, UNINIT_VAR(offset);
@ -3848,7 +3853,7 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
DBUG_ENTER("construct_options");
plugin_name_ptr= (char*) alloc_root(mem_root, plugin_name_len + 1);
plugin_name_ptr= static_cast<char*>(alloc_root(mem_root, plugin_name_len + 1));
safe_strcpy(plugin_name_ptr, plugin_name_len + 1, plugin_name);
my_casedn_str_latin1(plugin_name_ptr); // Plugin names are pure ASCII
convert_underscore_to_dash(plugin_name_ptr, plugin_name_len);
@ -3907,19 +3912,28 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
continue;
if (!(register_var(plugin_name_ptr, opt->name, opt->flags)))
continue;
switch (opt->flags & PLUGIN_VAR_TYPEMASK) {
switch (opt->flags & (PLUGIN_VAR_TYPEMASK | PLUGIN_VAR_UNSIGNED)) {
case PLUGIN_VAR_BOOL:
((thdvar_bool_t *) opt)->resolve= mysql_sys_var_char;
break;
case PLUGIN_VAR_INT:
((thdvar_int_t *) opt)->resolve= mysql_sys_var_int;
break;
case PLUGIN_VAR_INT | PLUGIN_VAR_UNSIGNED:
((thdvar_uint_t *) opt)->resolve= mysql_sys_var_uint;
break;
case PLUGIN_VAR_LONG:
((thdvar_long_t *) opt)->resolve= mysql_sys_var_long;
break;
case PLUGIN_VAR_LONG | PLUGIN_VAR_UNSIGNED:
((thdvar_ulong_t *) opt)->resolve= mysql_sys_var_ulong;
break;
case PLUGIN_VAR_LONGLONG:
((thdvar_longlong_t *) opt)->resolve= mysql_sys_var_longlong;
break;
case PLUGIN_VAR_LONGLONG | PLUGIN_VAR_UNSIGNED:
((thdvar_ulonglong_t *) opt)->resolve= mysql_sys_var_ulonglong;
break;
case PLUGIN_VAR_STR:
((thdvar_str_t *) opt)->resolve= mysql_sys_var_str;
break;

View file

@ -2837,8 +2837,8 @@ void mysql_sql_stmt_execute_immediate(THD *thd)
DBUG_VOID_RETURN; // out of memory
// See comments on thd->free_list in mysql_sql_stmt_execute()
Item *free_list_backup= thd->free_list;
thd->free_list= NULL;
SCOPE_VALUE(thd->free_list, (Item *) NULL);
SCOPE_EXIT([thd]() mutable { thd->free_items(); });
/*
Make sure we call Prepared_statement::execute_immediate()
with an empty THD::change_list. It can be non empty as the above
@ -2861,8 +2861,6 @@ void mysql_sql_stmt_execute_immediate(THD *thd)
Item_change_list_savepoint change_list_savepoint(thd);
(void) stmt->execute_immediate(query.str, (uint) query.length);
change_list_savepoint.rollback(thd);
thd->free_items();
thd->free_list= free_list_backup;
/*
stmt->execute_immediately() sets thd->query_string with the executed
@ -3038,7 +3036,7 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
if (lex->result)
{
lex->result->cleanup();
lex->result->reset_for_next_ps_execution();
lex->result->set_thd(thd);
}
lex->allow_sum_func.clear_all();
@ -3431,8 +3429,13 @@ void mysql_sql_stmt_execute(THD *thd)
so they don't get freed in case of re-prepare.
See MDEV-10702 Crash in SET STATEMENT FOR EXECUTE
*/
Item *free_list_backup= thd->free_list;
thd->free_list= NULL; // Hide the external (e.g. "SET STATEMENT") Items
/*
Hide and restore at scope exit the "external" (e.g. "SET STATEMENT") Item list.
It will be freed normally in THD::cleanup_after_query().
*/
SCOPE_VALUE(thd->free_list, (Item *) NULL);
// Free items created by execute_loop() at scope exit
SCOPE_EXIT([thd]() mutable { thd->free_items(); });
/*
Make sure we call Prepared_statement::execute_loop() with an empty
THD::change_list. It can be non-empty because the above
@ -3456,12 +3459,6 @@ void mysql_sql_stmt_execute(THD *thd)
(void) stmt->execute_loop(&expanded_query, FALSE, NULL, NULL);
change_list_savepoint.rollback(thd);
thd->free_items(); // Free items created by execute_loop()
/*
Now restore the "external" (e.g. "SET STATEMENT") Item list.
It will be freed normally in THD::cleanup_after_query().
*/
thd->free_list= free_list_backup;
stmt->lex->restore_set_statement_var();
DBUG_VOID_RETURN;

View file

@ -3353,6 +3353,8 @@ err:
}
else if (info->errmsg != NULL)
safe_strcpy(info->error_text, sizeof(info->error_text), info->errmsg);
else if (info->error_text[0] == 0)
safe_strcpy(info->error_text, sizeof(info->error_text), ER(info->error));
my_message(info->error, info->error_text, MYF(0));
@ -4533,7 +4535,8 @@ bool mysql_show_binlog_events(THD* thd)
my_off_t scan_pos = BIN_LOG_HEADER_SIZE;
while (scan_pos < pos)
{
ev= Log_event::read_log_event(&log, description_event,
int error;
ev= Log_event::read_log_event(&log, &error, description_event,
opt_master_verify_checksum);
scan_pos = my_b_tell(&log);
if (ev == NULL || !ev->is_valid())
@ -4608,8 +4611,9 @@ bool mysql_show_binlog_events(THD* thd)
writing about this in the server log would be confusing as it isn't
related to server operational status.
*/
int error;
for (event_count = 0;
(ev = Log_event::read_log_event(&log,
(ev = Log_event::read_log_event(&log, &error,
description_event,
(opt_master_verify_checksum ||
verify_checksum_once), false)); )
@ -4653,7 +4657,7 @@ bool mysql_show_binlog_events(THD* thd)
break;
}
if (unlikely(event_count < unit->lim.get_select_limit() && log.error))
if (unlikely(event_count < unit->lim.get_select_limit() && error))
{
errmsg = "Wrong offset or I/O error";
mysql_mutex_unlock(log_lock);

View file

@ -287,7 +287,7 @@ end:
static bool servers_load(THD *thd, TABLE_LIST *tables)
{
TABLE *table;
TABLE *table= tables[0].table;
READ_RECORD read_record_info;
bool return_val= TRUE;
DBUG_ENTER("servers_load");
@ -296,7 +296,8 @@ static bool servers_load(THD *thd, TABLE_LIST *tables)
free_root(&mem, MYF(0));
init_sql_alloc(key_memory_servers, &mem, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (init_read_record(&read_record_info,thd,table=tables[0].table, NULL, NULL,
table->use_all_columns();
if (init_read_record(&read_record_info,thd,table, NULL, NULL,
1,0, FALSE))
DBUG_RETURN(1);
while (!(read_record_info.read_record()))
@ -448,7 +449,6 @@ get_server_from_table_to_cache(TABLE *table)
FOREIGN_SERVER *server= (FOREIGN_SERVER *)alloc_root(&mem,
sizeof(FOREIGN_SERVER));
DBUG_ENTER("get_server_from_table_to_cache");
table->use_all_columns();
/* get each field into the server struct ptr */
ptr= get_field(&mem, table->field[0]);

View file

@ -70,8 +70,8 @@
#include "opt_trace.h"
#include "my_cpu.h"
#include "key.h"
#include "scope.h"
#include "vector_mhnsw.h"
#include "lex_symbol.h"
#include "mysql/plugin_function.h"
@ -1352,7 +1352,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
{
Protocol *protocol= thd->protocol;
char buff[2048];
String buffer(buff, sizeof(buff), system_charset_info);
String buffer(buff, sizeof(buff), &my_charset_utf8mb4_general_ci);
List<Item> field_list;
bool error= TRUE;
DBUG_ENTER("mysqld_show_create");
@ -1840,7 +1840,7 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value,
def_value->length(0);
if (has_default)
{
StringBuffer<MAX_FIELD_WIDTH> str(field->charset());
StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci);
if (field->default_value)
{
field->default_value->print(&str);
@ -2368,11 +2368,11 @@ int show_create_table_ex(THD *thd, TABLE_LIST *table_list, const char *force_db,
packet->append(STRING_WITH_LEN(" NULL"));
}
def_value.set(def_value_buf, sizeof(def_value_buf), system_charset_info);
def_value.set(def_value_buf, sizeof(def_value_buf), &my_charset_utf8mb4_general_ci);
if (get_field_default_value(thd, field, &def_value, 1))
{
packet->append(STRING_WITH_LEN(" DEFAULT "));
packet->append(def_value.ptr(), def_value.length(), system_charset_info);
packet->append(def_value.ptr(), def_value.length(), &my_charset_utf8mb4_general_ci);
}
if (field->vers_update_unversioned())
@ -4744,6 +4744,19 @@ static void get_table_engine_for_i_s(THD *thd, char *buf, TABLE_LIST *tl,
}
/*
Hide error for a non-existing table.
For example, this error can occur when we use a where condition
with a db name and table, but the table does not exist or
there is a view with the same name.
*/
static bool hide_object_error(uint err)
{
return err == ER_NO_SUCH_TABLE || err == ER_WRONG_OBJECT ||
err == ER_NOT_SEQUENCE;
}
/**
Fill I_S table with data obtained by performing full-blown table open.
@ -4879,17 +4892,7 @@ fill_schema_table_by_open(THD *thd, MEM_ROOT *mem_root,
{
if (!is_show_fields_or_keys)
{
/*
Hide error for a non-existing table and skip processing.
For example, this error can occur when we use a where condition
with a db name and table, but the table does not exist or
there is a view with the same name.
Some errors, like ER_UNKNOWN_STORAGE_ENGINE, can still allow table
processing, if the information schema table supports that.
*/
run= run && thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE
&& thd->get_stmt_da()->sql_errno() != ER_WRONG_OBJECT
&& thd->get_stmt_da()->sql_errno() != ER_NOT_SEQUENCE;
run= run && !hide_object_error(thd->get_stmt_da()->sql_errno());
if (!run)
{
thd->clear_error();
@ -4989,8 +4992,8 @@ static int fill_schema_table_names(THD *thd, TABLE_LIST *tables,
else
table->field[3]->store(STRING_WITH_LEN("ERROR"), cs);
if (unlikely(thd->is_error() &&
thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE))
if (unlikely(thd->is_error()) &&
hide_object_error(thd->get_stmt_da()->sql_errno()))
{
thd->clear_error();
return 0;
@ -5229,9 +5232,7 @@ static int fill_schema_table_from_frm(THD *thd, MEM_ROOT *mem_root,
share= tdc_acquire_share(thd, &table_list, GTS_TABLE | GTS_VIEW);
if (!share)
{
if (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE ||
thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT ||
thd->get_stmt_da()->sql_errno() == ER_NOT_SEQUENCE)
if (hide_object_error(thd->get_stmt_da()->sql_errno()))
{
res= 0;
}
@ -5277,16 +5278,25 @@ static int fill_schema_table_from_frm(THD *thd, MEM_ROOT *mem_root,
goto end_share;
}
if (!open_table_from_share(thd, share, table_name, 0,
(EXTRA_RECORD | OPEN_FRM_FILE_ONLY),
thd->open_options, &tbl, FALSE))
res= open_table_from_share(thd, share, table_name, 0,
EXTRA_RECORD | OPEN_FRM_FILE_ONLY,
thd->open_options, &tbl, FALSE);
if (res && hide_object_error(thd->get_stmt_da()->sql_errno()))
res= 0;
else
{
char buf[NAME_CHAR_LEN + 1];
if (unlikely(res))
get_table_engine_for_i_s(thd, buf, &table_list, db_name, table_name);
tbl.s= share;
table_list.table= &tbl;
table_list.view= (LEX*) share->is_view;
res= schema_table->process_table(thd, &table_list, table,
res, db_name, table_name);
closefrm(&tbl);
bool res2= schema_table->process_table(thd, &table_list, table, res,
db_name, table_name);
if (res == 0)
closefrm(&tbl);
res= res2;
}
@ -5414,7 +5424,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
DBUG_ENTER("get_all_tables");
LEX *lex= thd->lex;
TABLE *table= tables->table;
TABLE_LIST table_acl_check;
SELECT_LEX *lsel= tables->schema_select_lex;
ST_SCHEMA_TABLE *schema_table= tables->schema_table;
IS_table_read_plan *plan= tables->is_table_read_plan;
@ -5557,8 +5566,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
}
}
bzero((char*) &table_acl_check, sizeof(table_acl_check));
if (make_db_list(thd, &db_names, &plan->lookup_field_vals))
goto err;
@ -5567,9 +5574,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
LEX_CSTRING *db_name= db_names.at(i);
DBUG_ASSERT(db_name->length <= NAME_LEN);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!(check_access(thd, SELECT_ACL, db_name->str,
&thd->col_access, NULL, 0, 1) ||
(!thd->col_access && check_grant_db(thd, db_name->str))) ||
if (!check_access(thd, SELECT_ACL, db_name->str, &thd->col_access, 0,0,1) ||
sctx->master_access & (DB_ACLS | SHOW_DB_ACL) ||
acl_get_all3(sctx, db_name->str, 0))
#endif
@ -5591,6 +5596,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!(thd->col_access & TABLE_ACLS))
{
TABLE_LIST table_acl_check;
table_acl_check.reset();
table_acl_check.db= Lex_ident_db(*db_name);
table_acl_check.table_name= Lex_ident_table(*table_name);
table_acl_check.grant.privilege= thd->col_access;
@ -6995,8 +7002,7 @@ int store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
if (sp)
{
LEX_CSTRING tmp_string;
Sql_mode_save sql_mode_backup(thd);
thd->variables.sql_mode= sql_mode;
SCOPE_VALUE(thd->variables.sql_mode, sql_mode);
if (sph->type() == SP_TYPE_FUNCTION)
{

View file

@ -1316,7 +1316,6 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
char path[FN_REFLEN + 1];
LEX_CSTRING alias= null_clex_str;
LEX_CUSTRING version= {0, 0};
LEX_CSTRING partition_engine_name= null_clex_str;
StringBuffer<160> unknown_tables(system_charset_info);
DDL_LOG_STATE local_ddl_log_state;
const char *comment_start;
@ -1401,6 +1400,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
const LEX_CSTRING db= table->db;
const LEX_CSTRING table_name= table->table_name;
LEX_CSTRING cpath= {0,0};
LEX_CSTRING partition_engine_name= {NULL, 0};
handlerton *hton= 0;
Table_type table_type;
size_t path_length= 0;
@ -2811,6 +2811,315 @@ key_add_part_check_null(const handler *file, KEY *key_info,
}
static
my_bool key_check_without_overlaps(THD *thd, HA_CREATE_INFO *create_info,
Alter_info *alter_info,
Key &key)
{
DBUG_ENTER("key_check_without_overlaps");
if (!key.without_overlaps)
DBUG_RETURN(FALSE);
// append_system_key_parts is already called, so we should check all the
// columns except the last two.
const auto &period_start= create_info->period_info.period.start;
const auto &period_end= create_info->period_info.period.end;
List_iterator<Key_part_spec> part_it_forwarded(key.columns);
List_iterator<Key_part_spec> part_it(key.columns);
part_it_forwarded++;
part_it_forwarded++;
while (part_it_forwarded++)
{
Key_part_spec *key_part= part_it++;
if (period_start.streq(key_part->field_name)
|| period_end.streq(key_part->field_name))
{
my_error(ER_KEY_CONTAINS_PERIOD_FIELDS, MYF(0), key.name.str,
key_part->field_name.str);
DBUG_RETURN(TRUE);
}
}
if (key.key_create_info.algorithm == HA_KEY_ALG_HASH ||
key.key_create_info.algorithm == HA_KEY_ALG_LONG_HASH)
{
my_error(ER_KEY_CANT_HAVE_WITHOUT_OVERLAPS, MYF(0), key.name.str);
DBUG_RETURN(TRUE);
}
for (Key &key2: alter_info->key_list)
{
if (key2.type != Key::FOREIGN_KEY)
continue;
DBUG_ASSERT(&key != &key2);
const Foreign_key &fk= (Foreign_key&)key2;
if (fk.update_opt != FK_OPTION_CASCADE)
continue;
for (Key_part_spec& kp: key.columns)
{
for (Key_part_spec& kp2: fk.columns)
{
if (kp.field_name.streq(kp2.field_name))
{
my_error(ER_KEY_CANT_HAVE_WITHOUT_OVERLAPS, MYF(0), key.name.str);
DBUG_RETURN(TRUE);
}
}
}
}
create_info->period_info.unique_keys++;
DBUG_RETURN(FALSE);
}
static
my_bool init_key_part_spec(THD *thd, Alter_info *alter_info,
const handler *file,
const Key &key, Key_part_spec &kp,
uint max_key_length, uint max_key_part_length,
bool *is_hash_field_needed)
{
DBUG_ENTER("init_key_part_spec");
const Lex_ident_column &field_name= kp.field_name;
Create_field *column= NULL;
for (Create_field &c: alter_info->create_list)
if (c.field_name.streq(field_name))
column= &c;
/*
Either field is not present or field visibility is > INVISIBLE_USER
*/
if (!column || (column->invisible > INVISIBLE_USER && !kp.generated))
{
my_error(ER_KEY_COLUMN_DOES_NOT_EXIST, MYF(0), field_name.str);
DBUG_RETURN(TRUE);
}
if (!DBUG_IF("test_invisible_index")
&& column->invisible > INVISIBLE_USER
&& !(column->flags & VERS_SYSTEM_FIELD) && !key.invisible)
{
my_error(ER_KEY_COLUMN_DOES_NOT_EXIST, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
const Type_handler *type_handler= column->type_handler();
switch(key.type)
{
case Key::VECTOR:
if (type_handler->Key_part_spec_init_vector(&kp, *column))
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "VECTOR INDEX");
DBUG_RETURN(TRUE);
}
break;
case Key::FULLTEXT:
if (type_handler->Key_part_spec_init_ft(&kp, *column))
{
my_error(ER_BAD_FT_COLUMN, MYF(0), field_name.str);
DBUG_RETURN(-1);
}
break;
case Key::SPATIAL:
if (type_handler->Key_part_spec_init_spatial(&kp, *column))
DBUG_RETURN(TRUE);
break;
case Key::PRIMARY:
if (column->vcol_info)
{
my_error(ER_PRIMARY_KEY_BASED_ON_GENERATED_COLUMN, MYF(0));
DBUG_RETURN(TRUE);
}
if (type_handler->Key_part_spec_init_primary(&kp, *column, file))
DBUG_RETURN(TRUE);
break;
case Key::MULTIPLE:
if (type_handler->Key_part_spec_init_multiple(&kp, *column, file))
DBUG_RETURN(TRUE);
break;
case Key::FOREIGN_KEY:
if (type_handler->Key_part_spec_init_foreign(&kp, *column, file))
DBUG_RETURN(TRUE);
break;
case Key::UNIQUE:
if (type_handler->Key_part_spec_init_unique(&kp, *column, file,
is_hash_field_needed))
DBUG_RETURN(TRUE);
break;
case Key::IGNORE_KEY:
DBUG_ASSERT(0);
break;
}
uint key_part_length= type_handler->calc_key_length(*column);
if (kp.length)
{
if (f_is_blob(column->pack_flag))
{
key_part_length= MY_MIN(kp.length,
blob_length_by_type(column->real_field_type())
* column->charset->mbmaxlen);
if (key_part_length > max_key_length ||
key_part_length > max_key_part_length)
{
if (key.type == Key::MULTIPLE)
{
key_part_length= MY_MIN(max_key_length, max_key_part_length);
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % column->charset->mbmaxlen;
}
}
}
// Catch invalid use of partial keys
else if (!f_is_geom(column->pack_flag) &&
// is the key partial?
kp.length != key_part_length &&
// is prefix length bigger than field length?
(kp.length > key_part_length ||
// can the field have a partial key?
!type_handler->type_can_have_key_part() ||
// a packed field can't be used in a partial key
f_is_packed(column->pack_flag) ||
// does the storage engine allow prefixed search?
((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
// and is this a 'unique' key?
(key.type == Key::PRIMARY || key.type == Key::UNIQUE))))
{
my_message(ER_WRONG_SUB_KEY, ER_THD(thd, ER_WRONG_SUB_KEY), MYF(0));
DBUG_RETURN(TRUE);
}
else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
key_part_length= kp.length;
}
else if (key_part_length == 0 && (column->flags & NOT_NULL_FLAG) &&
!*is_hash_field_needed)
{
my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(), field_name.str);
DBUG_RETURN(TRUE);
}
if (key_part_length > max_key_part_length)
{
if (key.type == Key::MULTIPLE)
{
key_part_length= max_key_part_length;
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % column->charset->mbmaxlen;
}
else if (key.type == Key::PRIMARY)
{
key_part_length= MY_MIN(max_key_length, max_key_part_length);
my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
DBUG_RETURN(TRUE);
}
}
if (key.type == Key::UNIQUE && key_part_length > MY_MIN(max_key_length,
max_key_part_length))
*is_hash_field_needed= true;
/* We can not store key_part_length more than 2^16 - 1 in frm. */
if (*is_hash_field_needed && kp.length > UINT_MAX16)
{
my_error(ER_TOO_LONG_KEYPART, MYF(0), UINT_MAX16);
DBUG_RETURN(TRUE);
}
kp.length= key_part_length;
DBUG_RETURN(FALSE);
}
/**
@brief Initialize the key length and algorithm (if long hash).
This function does:
1. Append system key parts (versioning, periods)
2. Call Type_handler key_part initialization function.
3. Determine the length of each key_part.
4. Calculate the total Key length.
5. Determine if the key is long unique based on its length
smd result from type handler. It'll be saved in
key_create_info.algorithm as HA_KEY_ALG_LONG_HASH.
@return FALSE OK
@return TRUE error
*/
static
my_bool init_key_info(THD *thd, Alter_info *alter_info,
HA_CREATE_INFO *create_info,
const handler *file)
{
DBUG_ENTER("init_key_info");
uint max_key_length= file->max_key_length();
uint max_key_part_length= file->max_key_part_length();
for (Key &key: alter_info->key_list)
{
if (key.type == Key::FOREIGN_KEY)
continue;
int parts_added= append_system_key_parts(thd, create_info, &key);
if (parts_added < 0)
DBUG_RETURN(true);
bool is_hash_field_needed= false;
for (Key_part_spec &kp: key.columns)
{
if (init_key_part_spec(thd, alter_info, file, key, kp,
max_key_length, max_key_part_length,
&is_hash_field_needed))
DBUG_RETURN(TRUE);
key.length+= kp.length;
if (key.length > max_key_length)
{
if (key.type == Key::UNIQUE)
is_hash_field_needed= true; // for case "a BLOB UNIQUE"
else if (key.type <= Key::MULTIPLE)
{
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
DBUG_RETURN(TRUE);
}
}
KEY_CREATE_INFO *key_cinfo= &key.key_create_info;
if (is_hash_field_needed)
{
if (key_cinfo->algorithm == HA_KEY_ALG_UNDEF)
key_cinfo->algorithm= HA_KEY_ALG_LONG_HASH;
if (key_cinfo->algorithm != HA_KEY_ALG_HASH &&
key_cinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
DBUG_RETURN(TRUE);
}
}
}
}
DBUG_RETURN(FALSE);
}
/*
Prepare for a table creation.
Stage 1: prepare the field list.
@ -2912,7 +3221,7 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
{
Lex_ident_column key_name;
Create_field *sql_field,*dup_field;
uint field,null_fields,max_key_length;
uint field,null_fields;
ulong record_offset= 0;
KEY_PART_INFO *key_part_info;
int field_no,dup_no;
@ -2923,7 +3232,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
int select_field_count= C_CREATE_SELECT(create_table_mode);
bool tmp_table= create_table_mode == C_ALTER_TABLE;
const bool create_simple= thd->lex->create_simple();
bool is_hash_field_needed= false;
const CHARSET_INFO *scs= system_charset_info;
DBUG_ENTER("mysql_prepare_create_table");
@ -2941,7 +3249,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
create_info->versioned());
null_fields= 0;
create_info->varchar= 0;
max_key_length= file->max_key_length();
/* Handle creation of sequences */
if (create_info->sequence)
@ -3110,6 +3417,9 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
Key *key, *key2;
uint tmp, key_number;
if (init_key_info(thd, alter_info, create_info, file))
DBUG_RETURN(TRUE);
/* Calculate number of key segements */
*key_count= 0;
@ -3247,10 +3557,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
key_info->name= key_name;
key->name= key_info->name;
int parts_added= append_system_key_parts(thd, create_info, key);
if (parts_added < 0)
DBUG_RETURN(true);
key_parts += parts_added;
key_info++;
}
tmp=file->max_keys();
@ -3269,13 +3575,13 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
key_number=0;
for (; (key=key_iterator++) ; key_number++)
{
uint key_length=0;
Create_field *auto_increment_key= 0;
Key_part_spec *column;
st_plugin_int *index_plugin= hton2plugin[create_info->db_type->slot];
ha_create_table_option *index_options= create_info->db_type->index_options;
is_hash_field_needed= false;
bool is_hash_field_needed= key->key_create_info.algorithm
== HA_KEY_ALG_LONG_HASH;
if (key->type == Key::IGNORE_KEY)
{
/* ignore redundant keys */
@ -3286,6 +3592,9 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
break;
}
if (key_check_without_overlaps(thd, create_info, alter_info, *key))
DBUG_RETURN(true);
switch (key->type) {
case Key::MULTIPLE:
key_info->flags= 0;
@ -3329,10 +3638,12 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
if (key->generated)
key_info->flags|= HA_GENERATED_KEY;
key_info->key_length= key->length;
key_info->user_defined_key_parts=(uint8) key->columns.elements;
key_info->key_part=key_part_info;
key_info->usable_key_parts= key_number;
key_info->algorithm= key->key_create_info.algorithm;
key_info->without_overlaps= key->without_overlaps;
key_info->option_list= key->option_list;
extend_option_list(thd, index_plugin, !key->old, &key_info->option_list,
index_options);
@ -3397,37 +3708,11 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
CHARSET_INFO *ft_key_charset=0; // for FULLTEXT
for (uint column_nr=0 ; (column=cols++) ; column_nr++)
{
Key_part_spec *dup_column;
it.rewind();
field=0;
while ((sql_field=it++) &&
!column->field_name.streq(sql_field->field_name))
field++;
/*
Either field is not present or field visibility is > INVISIBLE_USER
*/
if (!sql_field || (sql_field->invisible > INVISIBLE_USER &&
!column->generated))
{
my_error(ER_KEY_COLUMN_DOES_NOT_EXIST, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
if (sql_field->invisible > INVISIBLE_USER &&
!(sql_field->flags & VERS_SYSTEM_FIELD) &&
!key->invisible && !DBUG_IF("test_invisible_index"))
{
my_error(ER_KEY_COLUMN_DOES_NOT_EXIST, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
while ((dup_column= cols2++) != column)
{
if (column->field_name.streq(dup_column->field_name))
{
my_error(ER_DUP_FIELDNAME, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
}
if (sql_field->compression_method())
{
@ -3437,15 +3722,9 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
}
cols2.rewind();
const Type_handler *field_type= sql_field->type_handler();
switch(key->type) {
switch(key->type)
{
case Key::VECTOR:
if (field_type->Key_part_spec_init_vector(column, *sql_field))
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "VECTOR INDEX");
DBUG_RETURN(TRUE);
}
if (sql_field->check_vcol_for_key(thd))
DBUG_RETURN(TRUE);
if (!(sql_field->flags & NOT_NULL_FLAG))
@ -3460,10 +3739,8 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(TRUE);
}
break;
case Key::FULLTEXT:
if (field_type->Key_part_spec_init_ft(column, *sql_field) ||
(ft_key_charset && sql_field->charset != ft_key_charset))
if (ft_key_charset && sql_field->charset != ft_key_charset)
{
my_error(ER_BAD_FT_COLUMN, MYF(0), column->field_name.str);
DBUG_RETURN(-1);
@ -3471,57 +3748,48 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
ft_key_charset= sql_field->charset;
break;
case Key::SPATIAL:
if (field_type->Key_part_spec_init_spatial(column, *sql_field) ||
sql_field->check_vcol_for_key(thd))
DBUG_RETURN(TRUE);
if (!(sql_field->flags & NOT_NULL_FLAG))
{
my_error(ER_INDEX_CANNOT_HAVE_NULL, MYF(0), "SPATIAL");
DBUG_RETURN(TRUE);
}
break;
case Key::PRIMARY:
if (sql_field->vcol_info)
{
my_error(ER_PRIMARY_KEY_BASED_ON_GENERATED_COLUMN, MYF(0));
DBUG_RETURN(TRUE);
}
if (field_type->Key_part_spec_init_primary(column, *sql_field, file))
DBUG_RETURN(TRUE);
if (!(sql_field->flags & NOT_NULL_FLAG))
{
/* Implicitly set primary key fields to NOT NULL for ISO conf. */
/* Implicitly set primary key fields to NOT NULL for ISO conformance. */
sql_field->flags|= NOT_NULL_FLAG;
sql_field->pack_flag&= ~FIELDFLAG_MAYBE_NULL;
null_fields--;
}
break;
case Key::MULTIPLE:
if (field_type->Key_part_spec_init_multiple(column, *sql_field, file) ||
sql_field->check_vcol_for_key(thd) ||
key_add_part_check_null(file, key_info, sql_field, column))
DBUG_RETURN(TRUE);
break;
case Key::FOREIGN_KEY:
if (field_type->Key_part_spec_init_foreign(column, *sql_field, file) ||
sql_field->check_vcol_for_key(thd) ||
key_add_part_check_null(file, key_info, sql_field, column))
DBUG_RETURN(TRUE);
break;
case Key::UNIQUE:
if (field_type->Key_part_spec_init_unique(column, *sql_field, file,
&is_hash_field_needed) ||
sql_field->check_vcol_for_key(thd) ||
key_add_part_check_null(file, key_info, sql_field, column))
case Key::MULTIPLE:
case Key::FOREIGN_KEY:
if (key_add_part_check_null(file, key_info, sql_field, column))
DBUG_RETURN(TRUE);
if (sql_field->check_vcol_for_key(thd))
DBUG_RETURN(TRUE);
break;
case Key::IGNORE_KEY:
break;
case Key::SPATIAL:
if (!(sql_field->flags & NOT_NULL_FLAG))
{
my_error(ER_INDEX_CANNOT_HAVE_NULL, MYF(0), "SPATIAL");
DBUG_RETURN(TRUE);
}
if (sql_field->check_vcol_for_key(thd))
DBUG_RETURN(TRUE);
break;
}
for (const Key_part_spec &kp2: key->columns)
{
if (column == &kp2)
break;
if (kp2.field_name.streq(column->field_name))
{
my_error(ER_DUP_FIELDNAME, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
}
if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
@ -3537,109 +3805,21 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
key_part_info->offset= (uint16) sql_field->offset;
key_part_info->key_type=sql_field->pack_flag;
key_part_info->key_part_flag= column->asc ? 0 : HA_REVERSE_SORT;
uint key_part_length= field_type->calc_key_length(*sql_field);
if (column->length)
{
if (f_is_blob(sql_field->pack_flag))
{
key_part_length= MY_MIN(column->length,
blob_length_by_type(sql_field->real_field_type())
* sql_field->charset->mbmaxlen);
if (key_part_length > max_key_length ||
key_part_length > file->max_key_part_length())
{
if (key->type == Key::MULTIPLE)
{
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
}
}
}
// Catch invalid use of partial keys
else if (!f_is_geom(sql_field->pack_flag) &&
// is the key partial?
column->length != key_part_length &&
// is prefix length bigger than field length?
(column->length > key_part_length ||
// can the field have a partial key?
!field_type->type_can_have_key_part() ||
// a packed field can't be used in a partial key
f_is_packed(sql_field->pack_flag) ||
// does the storage engine allow prefixed search?
((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
// and is this a 'unique' key?
(key_info->flags & HA_NOSAME))))
{
my_message(ER_WRONG_SUB_KEY, ER_THD(thd, ER_WRONG_SUB_KEY), MYF(0));
DBUG_RETURN(TRUE);
}
else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
key_part_length= column->length;
}
else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG) &&
!is_hash_field_needed)
{
my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
column->field_name.str);
DBUG_RETURN(TRUE);
}
if (key_part_length > file->max_key_part_length() &&
key->type != Key::FULLTEXT && key->type != Key::VECTOR)
{
if (key->type == Key::MULTIPLE)
{
key_part_length= file->max_key_part_length();
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
}
else
{
if (key->type != Key::UNIQUE)
{
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
DBUG_RETURN(TRUE);
}
}
}
if (key->type == Key::UNIQUE
&& key_part_length > MY_MIN(max_key_length,
file->max_key_part_length()))
is_hash_field_needed= true;
/* We can not store key_part_length more then 2^16 - 1 in frm */
if (is_hash_field_needed && column->length > UINT_MAX16)
{
my_error(ER_TOO_LONG_KEYPART, MYF(0), UINT_MAX16);
DBUG_RETURN(TRUE);
}
else
key_part_info->length= (uint16) key_part_length;
key_part_info->length= column->length;
/* Use packed keys for long strings on the first column */
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
(key_part_length >= KEY_DEFAULT_PACK_LENGTH) &&
(column->length >= KEY_DEFAULT_PACK_LENGTH) &&
!is_hash_field_needed)
{
key_info->flags|= field_type->KEY_pack_flags(column_nr);
key_info->flags|= sql_field->type_handler()->KEY_pack_flags(column_nr);
}
/* Check if the key segment is partial, set the key flag accordingly */
if (key_part_length != field_type->calc_key_length(*sql_field) &&
key_part_length != field_type->max_octet_length())
if (column->length != sql_field->type_handler()->
calc_key_length(*sql_field) &&
column->length != sql_field->type_handler()->max_octet_length())
key_info->flags|= HA_KEY_HAS_PART_KEY_SEG;
key_length+= key_part_length;
key_part_info++;
}
if (!key_info->name.str || check_column_name(key_info->name.str))
@ -3649,15 +3829,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
}
if (key->type == Key::UNIQUE && !(key_info->flags & HA_NULL_PART_KEY))
unique_key=1;
key_info->key_length=(uint16) key_length;
if (key_info->key_length > max_key_length && key->type == Key::UNIQUE)
is_hash_field_needed= true; // for case "a BLOB UNIQUE"
if (key_length > max_key_length && key->type != Key::FULLTEXT &&
key->type != Key::VECTOR && !is_hash_field_needed)
{
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
DBUG_RETURN(TRUE);
}
/* Check long unique keys */
if (is_hash_field_needed)
@ -3668,12 +3839,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
sql_field->field_name.str, key_info->name.str);
DBUG_RETURN(TRUE);
}
if (key_info->algorithm != HA_KEY_ALG_UNDEF &&
key_info->algorithm != HA_KEY_ALG_HASH)
{
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
DBUG_RETURN(TRUE);
}
}
if (is_hash_field_needed ||
(key_info->algorithm == HA_KEY_ALG_HASH &&
@ -3717,39 +3882,6 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info,
// Check if a duplicate index is defined.
check_duplicate_key(thd, key, key_info, &alter_info->key_list);
key_info->without_overlaps= key->without_overlaps;
if (key_info->without_overlaps)
{
if (key_info->algorithm == HA_KEY_ALG_HASH ||
key_info->algorithm == HA_KEY_ALG_LONG_HASH)
{
without_overlaps_err:
my_error(ER_KEY_CANT_HAVE_WITHOUT_OVERLAPS, MYF(0), key_info->name.str);
DBUG_RETURN(true);
}
key_iterator2.rewind();
while ((key2 = key_iterator2++))
{
if (key2->type != Key::FOREIGN_KEY)
continue;
DBUG_ASSERT(key != key2);
Foreign_key *fk= (Foreign_key*) key2;
if (fk->update_opt != FK_OPTION_CASCADE)
continue;
for (Key_part_spec& kp: key->columns)
{
for (Key_part_spec& kp2: fk->columns)
{
if (kp.field_name.streq(kp2.field_name))
{
goto without_overlaps_err;
}
}
}
}
create_info->period_info.unique_keys++;
}
key_info->is_ignored= key->key_create_info.is_ignored;
key_info++;
}
@ -4167,20 +4299,6 @@ static int append_system_key_parts(THD *thd, HA_CREATE_INFO *create_info,
my_error(ER_PERIOD_NOT_FOUND, MYF(0), key->period.str);
return -1;
}
const auto &period_start= create_info->period_info.period.start;
const auto &period_end= create_info->period_info.period.end;
List_iterator<Key_part_spec> part_it(key->columns);
while (Key_part_spec *key_part= part_it++)
{
if (period_start.streq(key_part->field_name)
|| period_end.streq(key_part->field_name))
{
my_error(ER_KEY_CONTAINS_PERIOD_FIELDS, MYF(0), key->name.str,
key_part->field_name.str);
return -1;
}
}
const auto &period= create_info->period_info.period;
key->columns.push_back(new (thd->mem_root)
Key_part_spec(&period.end, 0, true));
@ -7422,7 +7540,16 @@ bool mysql_compare_tables(TABLE *table, Alter_info *alter_info,
DBUG_RETURN(1);
/* Some very basic checks. */
if (table->s->fields != alter_info->create_list.elements ||
uint fields= table->s->fields;
/* There is no field count on fully-invisible fields, count them. */
for (Field **f_ptr= table->field; *f_ptr; f_ptr++)
{
if ((*f_ptr)->invisible >= INVISIBLE_FULL)
fields--;
}
if (fields != alter_info->create_list.elements ||
table->s->db_type() != create_info->db_type ||
table->s->tmp_table ||
(table->s->row_type != create_info->row_type))
@ -7433,6 +7560,9 @@ bool mysql_compare_tables(TABLE *table, Alter_info *alter_info,
for (Field **f_ptr= table->field; *f_ptr; f_ptr++)
{
Field *field= *f_ptr;
/* Skip hidden generated field like long hash index. */
if (field->invisible >= INVISIBLE_SYSTEM)
continue;
Create_field *tmp_new_field= tmp_new_field_it++;
/* Check that NULL behavior is the same. */
@ -7444,8 +7574,12 @@ bool mysql_compare_tables(TABLE *table, Alter_info *alter_info,
{
if (!tmp_new_field->field->vcol_info)
DBUG_RETURN(false);
if (!field->vcol_info->is_equal(tmp_new_field->field->vcol_info))
bool err;
if (!field->vcol_info->is_equivalent(thd, table->s, create_info->table->s,
tmp_new_field->field->vcol_info, err))
DBUG_RETURN(false);
if (err)
DBUG_RETURN(true);
}
/*
@ -7479,13 +7613,13 @@ bool mysql_compare_tables(TABLE *table, Alter_info *alter_info,
DBUG_RETURN(false);
/* Go through keys and check if they are compatible. */
KEY *table_key;
KEY *table_key_end= table->key_info + table->s->keys;
KEY *table_key= table->s->key_info;
KEY *table_key_end= table_key + table->s->keys;
KEY *new_key;
KEY *new_key_end= key_info_buffer + key_count;
/* Step through all keys of the first table and search matching keys. */
for (table_key= table->key_info; table_key < table_key_end; table_key++)
for (; table_key < table_key_end; table_key++)
{
/* Search a key with the same name. */
for (new_key= key_info_buffer; new_key < new_key_end; new_key++)
@ -7528,7 +7662,7 @@ bool mysql_compare_tables(TABLE *table, Alter_info *alter_info,
for (new_key= key_info_buffer; new_key < new_key_end; new_key++)
{
/* Search a key with the same name. */
for (table_key= table->key_info; table_key < table_key_end; table_key++)
for (table_key= table->s->key_info; table_key < table_key_end; table_key++)
{
if (table_key->name.streq(new_key->name))
break;
@ -9069,7 +9203,14 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
bzero((char*) &key_create_info, sizeof(key_create_info));
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
key_info->algorithm= HA_KEY_ALG_UNDEF;
key_info->algorithm= alter_ctx->fast_alter_partition ?
HA_KEY_ALG_HASH : HA_KEY_ALG_UNDEF;
/*
For fast alter partition we set HA_KEY_ALG_HASH above to make sure it
doesn't lose the hash property.
Otherwise we let mysql_prepare_create_table() decide if the hash field
is needed depending on the (possibly changed) data types.
*/
key_create_info.algorithm= key_info->algorithm;
/*
We copy block size directly as some engines, like Area, sets this
@ -9488,6 +9629,7 @@ fk_check_column_changes(THD *thd, const TABLE *table,
*bad_column_name= NULL;
enum fk_column_change_type result= FK_COLUMN_NO_CHANGE;
bool strict_mode= thd->is_strict_mode();
while ((column= column_it++))
{
@ -9532,7 +9674,7 @@ fk_check_column_changes(THD *thd, const TABLE *table,
goto func_exit;
}
if (old_field_not_null != new_field_not_null)
if (strict_mode && old_field_not_null != new_field_not_null)
{
if (referenced && !new_field_not_null)
{
@ -10464,7 +10606,6 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
HA_CHECK_OPT check_opt;
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool partition_changed= false;
bool fast_alter_partition= false;
#endif
bool require_copy_algorithm;
bool partial_alter= false;
@ -11043,7 +11184,7 @@ do_continue:;
Partitioning: part_info is prepared and returned via thd->work_part_info
*/
if (prep_alter_part_table(thd, table, alter_info, create_info,
&partition_changed, &fast_alter_partition))
&partition_changed, &alter_ctx.fast_alter_partition))
{
DBUG_RETURN(true);
}
@ -11080,7 +11221,7 @@ do_continue:;
Note, one can run a separate "ALTER TABLE t1 FORCE;" statement
before or after the partition change ALTER statement to upgrade data types.
*/
if (IF_PARTITIONING(!fast_alter_partition, 1))
if (!alter_ctx.fast_alter_partition)
Create_field::upgrade_data_types(alter_info->create_list);
if (create_info->check_fields(thd, alter_info,
@ -11092,7 +11233,7 @@ do_continue:;
promote_first_timestamp_column(&alter_info->create_list);
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (fast_alter_partition)
if (alter_ctx.fast_alter_partition)
{
/*
ALGORITHM and LOCK clauses are generally not allowed by the
@ -12215,7 +12356,7 @@ static int online_alter_read_from_binlog(THD *thd, rpl_group_info *rgi,
do
{
const auto *descr_event= rgi->rli->relay_log.description_event_for_exec;
auto *ev= Log_event::read_log_event(log_file, descr_event, 0, 1, ~0UL);
auto *ev= Log_event::read_log_event(log_file, &error, descr_event, 0, 1, ~0UL);
error= log_file->error;
if (unlikely(!ev))
{
@ -12978,14 +13119,15 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
protocol->store_null();
else
{
DEBUG_SYNC(thd, "mysql_checksum_table_before_calculate_checksum");
int error= t->file->calculate_checksum();
DEBUG_SYNC(thd, "mysql_checksum_table_after_calculate_checksum");
if (thd->killed)
{
/*
we've been killed; let handler clean up, and remove the
partial current row from the recordset (embedded lib)
*/
t->file->ha_rnd_end();
thd->protocol->remove_last_row();
goto err;
}

View file

@ -1474,8 +1474,9 @@ bool Table_triggers_list::prepare_record_accessors(TABLE *table)
{
int null_bytes= (table->s->fields - table->s->null_fields + 7)/8;
if (!(extra_null_bitmap= (uchar*)alloc_root(&table->mem_root, null_bytes)))
if (!(extra_null_bitmap= (uchar*)alloc_root(&table->mem_root, 2*null_bytes)))
return 1;
extra_null_bitmap_init= extra_null_bitmap + null_bytes;
if (!(record0_field= (Field **)alloc_root(&table->mem_root,
(table->s->fields + 1) *
sizeof(Field*))))
@ -1500,13 +1501,17 @@ bool Table_triggers_list::prepare_record_accessors(TABLE *table)
null_ptr++, null_bit= 1;
else
null_bit*= 2;
if (f->flags & NO_DEFAULT_VALUE_FLAG)
f->set_null();
else
f->set_notnull();
}
else
*trg_fld= *fld;
}
*trg_fld= 0;
DBUG_ASSERT(null_ptr <= extra_null_bitmap + null_bytes);
bzero(extra_null_bitmap, null_bytes);
memcpy(extra_null_bitmap_init, extra_null_bitmap, null_bytes);
}
else
{

View file

@ -168,7 +168,7 @@ class Table_triggers_list: public Sql_alloc
BEFORE INSERT/UPDATE triggers.
*/
Field **record0_field;
uchar *extra_null_bitmap;
uchar *extra_null_bitmap, *extra_null_bitmap_init;
/**
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
@ -232,8 +232,8 @@ public:
/* End of character ser context. */
Table_triggers_list(TABLE *table_arg)
:record0_field(0), extra_null_bitmap(0), record1_field(0),
trigger_table(table_arg),
:record0_field(0), extra_null_bitmap(0), extra_null_bitmap_init(0),
record1_field(0), trigger_table(table_arg),
m_has_unparseable_trigger(false), count(0)
{
bzero((char *) triggers, sizeof(triggers));
@ -308,11 +308,15 @@ public:
TABLE_LIST *table_list);
Field **nullable_fields() { return record0_field; }
void reset_extra_null_bitmap()
void clear_extra_null_bitmap()
{
size_t null_bytes= (trigger_table->s->fields -
trigger_table->s->null_fields + 7)/8;
bzero(extra_null_bitmap, null_bytes);
if (size_t null_bytes= extra_null_bitmap_init - extra_null_bitmap)
bzero(extra_null_bitmap, null_bytes);
}
void default_extra_null_bitmap()
{
if (size_t null_bytes= extra_null_bitmap_init - extra_null_bitmap)
memcpy(extra_null_bitmap, extra_null_bitmap_init, null_bytes);
}
Trigger *find_trigger(const LEX_CSTRING *name, bool remove_from_list);

View file

@ -7943,8 +7943,13 @@ Type_handler_datetime_common::convert_item_for_comparison(
const char *msg,
Sql_condition **cond_hdl) override
{
hit++;
return *level >= Sql_condition::WARN_LEVEL_WARN;
if (sql_errno == ER_TRUNCATED_WRONG_VALUE ||
sql_errno == ER_DATETIME_FUNCTION_OVERFLOW)
{
hit++;
return *level >= Sql_condition::WARN_LEVEL_WARN;
}
return false;
}
} cnt_handler;

View file

@ -538,7 +538,7 @@ int select_unit::delete_record()
tables of JOIN - exec_tmp_table_[1 | 2].
*/
void select_unit::cleanup()
void select_unit::reset_for_next_ps_execution()
{
table->file->extra(HA_EXTRA_RESET_STATE);
table->file->ha_delete_all_rows();
@ -899,11 +899,11 @@ bool select_unit_ext::send_eof()
return (MY_TEST(error));
}
void select_union_recursive::cleanup()
void select_union_recursive::reset_for_next_ps_execution()
{
if (table)
{
select_unit::cleanup();
select_unit::reset_for_next_ps_execution();
free_tmp_table(thd, table);
}
@ -2380,8 +2380,7 @@ bool st_select_lex_unit::exec_inner()
if (uncacheable || !item || !item->assigned() || describe)
{
if (!fake_select_lex && !(with_element && with_element->is_recursive))
union_result->cleanup();
union_result->reset_for_next_ps_execution();
for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select())
{
ha_rows records_at_start= 0;
@ -2777,7 +2776,7 @@ bool st_select_lex_unit::cleanup()
{
if (union_result)
{
((select_union_recursive *) union_result)->cleanup();
((select_union_recursive *) union_result)->reset_for_next_ps_execution();
delete union_result;
union_result= 0;
}

View file

@ -936,9 +936,9 @@ update_begin:
goto update_end;
}
if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_UPDATE) &&
!table->prepare_triggers_for_update_stmt_or_event() &&
!thd->lex->with_rownum)
if (!table->prepare_triggers_for_update_stmt_or_event() &&
!thd->lex->with_rownum &&
table->file->ha_table_flags() & HA_CAN_FORCE_BULK_UPDATE)
will_batch= !table->file->start_bulk_update();
/*
@ -2502,6 +2502,13 @@ void multi_update::abort_result_set()
(!thd->transaction->stmt.modified_non_trans_table && !updated)))
return;
/****************************************************************************
NOTE: if you change here be aware that almost the same code is in
multi_update::send_eof().
***************************************************************************/
/* Something already updated so we have to invalidate cache */
if (updated)
query_cache_invalidate3(thd, update_tables, 1);
@ -2834,6 +2841,13 @@ bool multi_update::send_eof()
killed_status= (local_error == 0) ? NOT_KILLED : thd->killed;
THD_STAGE_INFO(thd, stage_end);
/****************************************************************************
NOTE: if you change here be aware that almost the same code is in
multi_update::abort_result_set().
***************************************************************************/
/* We must invalidate the query cache before binlog writing and
ha_autocommit_... */

View file

@ -1256,10 +1256,14 @@ static bool update_binlog_space_limit(sys_var *, THD *,
{
#ifdef HAVE_REPLICATION
/* Refresh summary of binlog sizes */
mysql_bin_log.lock_index();
binlog_space_limit= internal_binlog_space_limit;
slave_connections_needed_for_purge=
ulonglong loc_binlog_space_limit= internal_binlog_space_limit;
uint loc_slave_connections_needed_for_purge=
internal_slave_connections_needed_for_purge;
mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_bin_log.lock_index();
binlog_space_limit= loc_binlog_space_limit;
slave_connections_needed_for_purge=
loc_slave_connections_needed_for_purge;
if (opt_bin_log)
{
@ -1269,9 +1273,11 @@ static bool update_binlog_space_limit(sys_var *, THD *,
sending_new_binlog_file++;
mysql_bin_log.unlock_index();
mysql_bin_log.purge(1);
mysql_mutex_lock(&LOCK_global_system_variables);
return 0;
}
mysql_bin_log.unlock_index();
mysql_mutex_lock(&LOCK_global_system_variables);
#endif
return 0;
}
@ -1812,7 +1818,10 @@ Sys_max_binlog_stmt_cache_size(
static bool fix_max_binlog_size(sys_var *self, THD *thd, enum_var_type type)
{
mysql_bin_log.set_max_size(max_binlog_size);
ulong saved= max_binlog_size;
mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_bin_log.set_max_size(saved);
mysql_mutex_lock(&LOCK_global_system_variables);
return false;
}
static Sys_var_on_access_global<Sys_var_ulong,
@ -1951,7 +1960,7 @@ Sys_pseudo_thread_id(
"pseudo_thread_id",
"This variable is for internal server use",
SESSION_ONLY(pseudo_thread_id),
NO_CMD_LINE, VALID_RANGE(0, ULONGLONG_MAX), DEFAULT(0),
NO_CMD_LINE, VALID_RANGE(0, MY_THREAD_ID_MAX), DEFAULT(0),
BLOCK_SIZE(1), NO_MUTEX_GUARD, IN_BINLOG);
static bool

View file

@ -3822,6 +3822,27 @@ bool Virtual_column_info::cleanup_session_expr()
}
bool
Virtual_column_info::is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share,
const Virtual_column_info* vcol, bool &error) const
{
error= true;
Item *cmp_expr= vcol->expr->build_clone(thd);
if (!cmp_expr)
return false;
Item::func_processor_rename_table param;
param.old_db= Lex_ident_db(vcol_share->db);
param.old_table= Lex_ident_table(vcol_share->table_name);
param.new_db= Lex_ident_db(share->db);
param.new_table= Lex_ident_table(share->table_name);
cmp_expr->walk(&Item::rename_table_processor, 1, &param);
error= false;
return type_handler() == vcol->type_handler()
&& is_stored() == vcol->is_stored()
&& expr->eq(cmp_expr, true);
}
class Vcol_expr_context
{
@ -4259,6 +4280,24 @@ bool copy_keys_from_share(TABLE *outparam, MEM_ROOT *root)
return 0;
}
void TABLE::update_keypart_vcol_info()
{
for (uint k= 0; k < s->keys; k++)
{
KEY &info_k= key_info[k];
uint parts = (s->use_ext_keys ? info_k.ext_key_parts :
info_k.user_defined_key_parts);
for (uint p= 0; p < parts; p++)
{
KEY_PART_INFO &kp= info_k.key_part[p];
if (kp.field != field[kp.fieldnr - 1])
{
kp.field->vcol_info = field[kp.fieldnr - 1]->vcol_info;
}
}
}
}
/*
Open a table based on a TABLE_SHARE
@ -4491,20 +4530,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
/* Update to use trigger fields */
switch_defaults_to_nullable_trigger_fields(outparam);
for (uint k= 0; k < share->keys; k++)
{
KEY *key_info= &outparam->key_info[k];
uint parts= (share->use_ext_keys ? key_info->ext_key_parts :
key_info->user_defined_key_parts);
for (uint p=0; p < parts; p++)
{
KEY_PART_INFO *kp= &key_info->key_part[p];
if (kp->field != outparam->field[kp->fieldnr - 1])
{
kp->field->vcol_info= outparam->field[kp->fieldnr - 1]->vcol_info;
}
}
}
outparam->update_keypart_vcol_info();
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@ -7376,6 +7402,7 @@ void Field_iterator_natural_join::next()
{
cur_column_ref= column_ref_it++;
DBUG_ASSERT(!cur_column_ref || ! cur_column_ref->table_field ||
!cur_column_ref->table_field->field ||
cur_column_ref->table_ref->table ==
cur_column_ref->table_field->field->table);
}
@ -9648,8 +9675,8 @@ void TABLE::prepare_triggers_for_insert_stmt_or_event()
{
if (triggers)
{
if (triggers->has_triggers(TRG_EVENT_DELETE,
TRG_ACTION_AFTER))
triggers->clear_extra_null_bitmap();
if (triggers->has_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER))
{
/*
The table has AFTER DELETE triggers that might access to
@ -9658,8 +9685,7 @@ void TABLE::prepare_triggers_for_insert_stmt_or_event()
*/
(void) file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
}
if (triggers->has_triggers(TRG_EVENT_UPDATE,
TRG_ACTION_AFTER))
if (triggers->has_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER))
{
/*
The table has AFTER UPDATE triggers that might access to subject
@ -9674,17 +9700,19 @@ void TABLE::prepare_triggers_for_insert_stmt_or_event()
bool TABLE::prepare_triggers_for_delete_stmt_or_event()
{
if (triggers &&
triggers->has_triggers(TRG_EVENT_DELETE,
TRG_ACTION_AFTER))
if (triggers)
{
/*
The table has AFTER DELETE triggers that might access to subject table
and therefore might need delete to be done immediately. So we turn-off
the batching.
*/
(void) file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
return TRUE;
triggers->clear_extra_null_bitmap();
if (triggers->has_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER))
{
/*
The table has AFTER DELETE triggers that might access to subject table
and therefore might need delete to be done immediately. So we turn-off
the batching.
*/
(void) file->extra(HA_EXTRA_DELETE_CANNOT_BATCH);
return TRUE;
}
}
return FALSE;
}
@ -9692,17 +9720,19 @@ bool TABLE::prepare_triggers_for_delete_stmt_or_event()
bool TABLE::prepare_triggers_for_update_stmt_or_event()
{
if (triggers &&
triggers->has_triggers(TRG_EVENT_UPDATE,
TRG_ACTION_AFTER))
if (triggers)
{
/*
The table has AFTER UPDATE triggers that might access to subject
table and therefore might need update to be done immediately.
So we turn-off the batching.
*/
(void) file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
return TRUE;
triggers->clear_extra_null_bitmap();
if (triggers->has_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER))
{
/*
The table has AFTER UPDATE triggers that might access to subject
table and therefore might need update to be done immediately.
So we turn-off the batching.
*/
(void) file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
return TRUE;
}
}
return FALSE;
}

View file

@ -1769,6 +1769,7 @@ public:
bool is_filled_at_execution();
bool update_const_key_parts(COND *conds);
void update_keypart_vcol_info();
inline void initialize_opt_range_structures();

View file

@ -115,11 +115,15 @@ bool trans_begin(THD *thd, uint flags)
if (thd->in_multi_stmt_transaction_mode() ||
(thd->variables.option_bits & OPTION_TABLE_LOCK))
{
bool was_in_trans= thd->server_status &
(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
thd->variables.option_bits&= ~OPTION_TABLE_LOCK;
thd->server_status&=
~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
res= MY_TEST(ha_commit_trans(thd, TRUE));
if (was_in_trans)
trans_reset_one_shot_chistics(thd);
#ifdef WITH_WSREP
if (wsrep_thd_is_local(thd))
{

View file

@ -22,6 +22,7 @@
#include "wsrep_xid.h"
#include "wsrep_thd.h"
#include "wsrep_trans_observer.h"
#include "wsrep_schema.h" // wsrep_schema
#include "slave.h" // opt_log_slave_updates
#include "debug_sync.h"
@ -180,6 +181,10 @@ int wsrep_apply_events(THD* thd,
{
thd->variables.gtid_seq_no= gtid_ev->seq_no;
}
if (wsrep_gtid_mode)
wsrep_schema->store_gtid_event(thd, gtid_ev);
delete ev;
}
continue;

View file

@ -1725,6 +1725,42 @@ static void wsrep_keys_free(wsrep_key_arr_t* key_arr)
key_arr->keys_len= 0;
}
class Unknown_storage_engine_handler : public Internal_error_handler
{
public:
Unknown_storage_engine_handler()
: m_handled_errors(0), m_unhandled_errors(0)
{}
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
Sql_condition::enum_warning_level *level,
const char* msg,
Sql_condition ** cond_hdl) override
{
*cond_hdl= NULL;
if (sql_errno == ER_UNKNOWN_STORAGE_ENGINE)
{
m_handled_errors++;
}
else if (*level == Sql_condition::WARN_LEVEL_ERROR)
{
m_unhandled_errors++;
}
return FALSE;
}
bool safely_trapped_errors()
{
return ((m_handled_errors > 0) && (m_unhandled_errors == 0));
}
private:
int m_handled_errors;
int m_unhandled_errors;
};
/*!
* @param thd thread
* @param tables list of tables
@ -1732,99 +1768,116 @@ static void wsrep_keys_free(wsrep_key_arr_t* key_arr)
* @return 0 if parent table append was successful, non-zero otherwise.
*/
bool
wsrep_append_fk_parent_table(THD* thd, TABLE_LIST* tables, wsrep::key_array* keys)
bool wsrep_append_fk_parent_table(THD *thd, TABLE_LIST *tables,
wsrep::key_array *keys)
{
bool fail= false;
TABLE_LIST *table;
TABLE_LIST *table_last_in_list;
assert(wsrep_thd_is_local(thd));
for (table= tables; table; table= table->next_local)
bool fail= false;
Open_table_context ot_ctx(thd, MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL);
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (!table->table)
{
if (is_temporary_table(table))
TABLE_LIST *save_next_global= table->next_global;
TABLE_LIST::enum_open_strategy save_open_strategy= table->open_strategy;
table->open_strategy= TABLE_LIST::OPEN_IF_EXISTS;
Unknown_storage_engine_handler no_storage_engine;
thd->push_internal_handler(&no_storage_engine);
if (open_table(thd, table, &ot_ctx))
{
WSREP_DEBUG("Temporary table %s.%s already opened query=%s", table->db.str,
table->table_name.str, wsrep_thd_query(thd));
return false;
}
}
thd->release_transactional_locks();
uint counter;
MDL_savepoint mdl_savepoint= thd->mdl_context.mdl_savepoint();
for (table_last_in_list= tables;;table_last_in_list= table_last_in_list->next_local) {
if (!table_last_in_list->next_local) {
break;
}
}
if (open_tables(thd, &tables, &counter, MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL))
{
WSREP_DEBUG("Unable to open table for FK checks for %s", wsrep_thd_query(thd));
fail= true;
goto exit;
}
for (table= tables; table; table= table->next_local)
{
if (!is_temporary_table(table) && table->table)
{
FOREIGN_KEY_INFO *f_key_info;
List<FOREIGN_KEY_INFO> f_key_list;
table->table->file->get_foreign_key_list(thd, &f_key_list);
List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
while ((f_key_info=it++))
if (no_storage_engine.safely_trapped_errors())
{
WSREP_DEBUG("appended fkey %s", f_key_info->referenced_table->str);
keys->push_back(wsrep_prepare_key_for_toi(f_key_info->referenced_db->str,
f_key_info->referenced_table->str,
wsrep::key::shared));
Diagnostics_area *da= thd->get_stmt_da();
da->reset_diagnostics_area();
da->clear_warning_info(thd->query_id);
}
else
{
fail= true;
}
}
thd->pop_internal_handler();
table->next_global= save_next_global;
table->open_strategy= save_open_strategy;
if (fail)
{
WSREP_DEBUG("Unable to open table for FK checks for %s",
wsrep_thd_query(thd));
goto exit;
}
}
if (table->table && !is_temporary_table(table))
{
FOREIGN_KEY_INFO *f_key_info;
List<FOREIGN_KEY_INFO> f_key_list;
table->table->file->get_foreign_key_list(thd, &f_key_list);
List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
while ((f_key_info= it++))
{
WSREP_DEBUG("appended fkey %s", f_key_info->referenced_table->str);
keys->push_back(wsrep_prepare_key_for_toi(
f_key_info->referenced_db->str, f_key_info->referenced_table->str,
wsrep::key::shared));
}
}
}
exit:
DEBUG_SYNC(thd, "wsrep_append_fk_toi_keys_before_close_tables");
DEBUG_SYNC(thd, "wsrep_append_fk_toi_keys_before_close_tables");
/* close the table and release MDL locks */
close_thread_tables(thd);
thd->mdl_context.rollback_to_savepoint(mdl_savepoint);
bool invalidate_next_global= false;
for (table= tables; table; table= table->next_local)
/* close the table and release MDL locks */
close_thread_tables(thd);
thd->mdl_context.rollback_to_savepoint(ot_ctx.start_of_statement_svp());
for (TABLE_LIST *table= tables; table; table= table->next_global)
{
table->table= NULL;
table->mdl_request.ticket= NULL;
}
/*
Reopen temporary tables if necessary.
DROP TABLE pre-opens temporary tables, but the corresponding
command does not have the CF_PREOPEN_TMP_TABLES flag set.
*/
const bool preopen_tmp_tables=
thd->lex->sql_command == SQLCOM_DROP_TABLE ||
(sql_command_flags[thd->lex->sql_command] & CF_PREOPEN_TMP_TABLES);
if (preopen_tmp_tables && thd->open_temporary_tables(tables))
{
WSREP_INFO("Unable to reopen temporary tables after FK checks");
fail= true;
}
/*
MDEV-32938: Check if DDL operation has been killed before.
It may be that during collecting foreign keys this operation gets
BF-aborted by another already-running TOI operation because it got MDL
locks on the same table for checking foreign keys. After
`close_thread_tables()` has been called it's safe to assume that no-one can
BF-abort this operation as it's not holding any MDL locks any more.
*/
if (!fail)
{
mysql_mutex_lock(&thd->LOCK_thd_kill);
if (thd->killed)
{
table->table= NULL;
table->mdl_request.ticket= NULL;
// We should invalidate `next_global` only for entries that are added
// in this function
if (table == table_last_in_list) {
invalidate_next_global= true;
}
if (invalidate_next_global) {
table->next_global= NULL;
}
fail= true;
}
mysql_mutex_unlock(&thd->LOCK_thd_kill);
}
/*
MDEV-32938: Check if DDL operation has been killed before.
It may be that during collecting foreign keys this operation gets BF-aborted
by another already-running TOI operation because it got MDL locks on the same
table for checking foreign keys.
After `close_thread_tables()` has been called it's safe to assume that no-one
can BF-abort this operation as it's not holding any MDL locks any more.
*/
if (!fail)
{
mysql_mutex_lock(&thd->LOCK_thd_kill);
if (thd->killed)
{
fail= true;
}
mysql_mutex_unlock(&thd->LOCK_thd_kill);
}
return fail;
return fail;
}
bool wsrep_reload_ssl()
@ -2234,11 +2287,18 @@ int wsrep_to_buf_helper(
domain_id= wsrep_gtid_server.domain_id;
server_id= wsrep_gtid_server.server_id;
}
Gtid_log_event gtid_event(thd, seqno, domain_id, true,
LOG_EVENT_SUPPRESS_USE_F, true, 0);
gtid_event.server_id= server_id;
if (!gtid_event.is_valid()) ret= 0;
ret= writer.write(&gtid_event);
/*
* Ignore if both thd->variables.gtid_seq_no and
* thd->variables.wsrep_gtid_seq_no are not set.
*/
if (seqno)
{
Gtid_log_event gtid_event(thd, seqno, domain_id, true,
LOG_EVENT_SUPPRESS_USE_F, true, 0);
gtid_event.server_id= server_id;
if (!gtid_event.is_valid()) ret= 0;
ret= writer.write(&gtid_event);
}
}
/*
It's local DDL so in case of possible gtid seqno (SET gtid_seq_no=X)
@ -2845,8 +2905,11 @@ static int wsrep_TOI_begin(THD *thd, const char *db, const char *table,
if (!thd->is_error())
{
my_error(ER_LOCK_DEADLOCK, MYF(0), "WSREP replication failed. Check "
"your wsrep connection state and retry the query.");
push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_ERROR,
ER_LOCK_DEADLOCK,
"WSREP replication failed. Check "
"your wsrep connection state and retry the query.");
my_error(ER_LOCK_DEADLOCK, MYF(0));
}
}
rc= -1;
@ -3352,8 +3415,6 @@ static inline bool is_committing_connection(THD *thd)
static my_bool have_client_connections(THD *thd, void*)
{
DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
(longlong) thd->thread_id));
if (is_client_connection(thd))
{
if (thd->killed == KILL_CONNECTION ||
@ -3405,13 +3466,18 @@ static my_bool kill_all_threads(THD *thd, THD *caller_thd)
/* We skip slave threads & scheduler on this first loop through. */
if (is_client_connection(thd) && thd != caller_thd)
{
if (thd->get_stmt_da()->is_eof())
/* the connection executing SHUTDOWN, should do clean exit,
not aborting here */
if (thd->get_command() == COM_SHUTDOWN)
{
WSREP_DEBUG("leaving SHUTDOWN executing connection alive, thread: %lld",
(longlong) thd->thread_id);
return 0;
}
/* replaying connection is killed by signal */
if (is_replaying_connection(thd))
{
WSREP_DEBUG("closing connection is replaying %lld", (longlong) thd->thread_id);
thd->set_killed(KILL_CONNECTION_HARD);
return 0;
}
@ -3420,7 +3486,7 @@ static my_bool kill_all_threads(THD *thd, THD *caller_thd)
{
/* replicated transactions must be skipped */
WSREP_DEBUG("closing connection %lld", (longlong) thd->thread_id);
/* instead of wsrep_close_thread() we do now soft kill by THD::awake */
/* instead of wsrep_close_thread() we do now hard kill by THD::awake */
thd->awake(KILL_CONNECTION_HARD);
return 0;
}
@ -3461,8 +3527,10 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD* except_caller_thd)
*/
server_threads.iterate(kill_remaining_threads, except_caller_thd);
DBUG_PRINT("quit", ("Waiting for threads to die (count=%u)", THD_count::value()));
WSREP_DEBUG("waiting for client connections to close: %u", THD_count::value());
DBUG_PRINT("quit", ("Waiting for threads to die (count=%u)",
THD_count::value()));
WSREP_DEBUG("waiting for client connections to close: %u",
THD_count::value());
while (wait_to_end && server_threads.iterate(have_client_connections))
{
@ -4018,3 +4086,15 @@ void wsrep_commit_empty(THD* thd, bool all)
}
DBUG_VOID_RETURN;
}
bool wsrep_table_list_has_non_temp_tables(THD *thd, TABLE_LIST *tables)
{
for (TABLE_LIST *table= tables; table; table= table->next_global)
{
if (!is_temporary_table(table))
{
return true;
}
}
return false;
}

View file

@ -593,6 +593,13 @@ wsrep::key wsrep_prepare_key_for_toi(const char* db, const char* table,
void wsrep_wait_ready(THD *thd);
void wsrep_ready_set(bool ready_value);
/**
* Returns true if the given list of tables contains at least one
* non-temporary table.
*/
bool wsrep_table_list_has_non_temp_tables(THD *thd, TABLE_LIST *tables);
#else /* !WITH_WSREP */
/* These macros are needed to compile MariaDB without WSREP support

View file

@ -1,4 +1,4 @@
/* Copyright (C) 2015-2023 Codership Oy <info@codership.com>
/* Copyright (C) 2015-2025 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -31,6 +31,8 @@
#include "wsrep_storage_service.h"
#include "wsrep_thd.h"
#include "wsrep_server_state.h"
#include "log_event.h"
#include "sql_class.h"
#include <string>
#include <sstream>
@ -1652,6 +1654,64 @@ int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
DBUG_RETURN(ret);
}
int Wsrep_schema::store_gtid_event(THD* thd,
const Gtid_log_event *gtid)
{
DBUG_ENTER("Wsrep_schema::store_gtid_event");
int error=0;
void *hton= NULL;
const bool in_transaction= (gtid->flags2 & Gtid_log_event::FL_TRANSACTIONAL);
const bool in_ddl= (gtid->flags2 & Gtid_log_event::FL_DDL);
DBUG_PRINT("info", ("thd: %p, in_transaction: %d, in_ddl: %d "
"in_active_multi_stmt_transaction: %d",
thd, in_transaction, in_ddl,
thd->in_active_multi_stmt_transaction()));
Wsrep_schema_impl::wsrep_ignore_table ignore_table(thd);
Wsrep_schema_impl::binlog_off binlog_off(thd);
Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd);
rpl_group_info *rgi= thd->wsrep_rgi;
const uint64 sub_id= rpl_global_gtid_slave_state->next_sub_id(gtid->domain_id);
rpl_gtid current_gtid;
current_gtid.domain_id= gtid->domain_id;
current_gtid.server_id= gtid->server_id;
current_gtid.seq_no= gtid->seq_no;
rgi->gtid_pending= false;
DBUG_ASSERT(!in_transaction || thd->in_active_multi_stmt_transaction());
if ((error= rpl_global_gtid_slave_state->record_gtid(thd, &current_gtid,
sub_id,
in_transaction, false, &hton)))
goto out;
rpl_global_gtid_slave_state->update_state_hash(sub_id, &current_gtid, hton, rgi);
if (in_ddl)
{
// Commit transaction if this GTID is part of DDL-clause because
// DDL causes implicit commit assuming there is no multi statement
// transaction ongoing.
if((error= trans_commit_stmt(thd)))
goto out;
(void)trans_commit(thd);
}
out:
if (error)
{
WSREP_DEBUG("Wsrep_schema::store_gtid_event %llu-%llu-%llu failed error=%s (%d).",
gtid->domain_id, gtid->server_id, gtid->seq_no, strerror(error), error);
(void)trans_rollback_stmt(thd);
(void)trans_rollback(thd);
}
DBUG_RETURN(error);
}
void Wsrep_schema::clear_allowlist()
{
THD* thd= new THD(next_thread_id());

View file

@ -1,4 +1,4 @@
/* Copyright (C) 2015-2023 Codership Oy <info@codership.com>
/* Copyright (C) 2015-2024 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -22,7 +22,6 @@
#include "mysqld.h"
#include "wsrep_mysqld.h"
/*
Forward decls
*/
@ -32,6 +31,7 @@ struct TABLE;
struct TABLE_LIST;
struct st_mysql_lex_string;
typedef struct st_mysql_lex_string LEX_STRING;
class Gtid_log_event;
/** Name of the table in `wsrep_schema_str` used for storing streaming
replication data. In an InnoDB full format, e.g. "database/tablename". */
@ -133,6 +133,15 @@ class Wsrep_schema
*/
int recover_sr_transactions(THD* orig_thd);
/**
Store GTID-event to mysql.gtid_slave_pos table.
@param thd The THD object of the calling thread.
@param gtid GTID event from binlog.
@return Zero on success, non-zero on failure.
*/
int store_gtid_event(THD* thd, const Gtid_log_event *gtid);
/**
Delete all rows on bootstrap from `wsrep_allowlist` variable

View file

@ -496,7 +496,7 @@ void wsrep_backup_kill_for_commit(THD *thd)
void wsrep_restore_kill_after_commit(THD *thd)
{
DBUG_ASSERT(WSREP(thd));
DBUG_ASSERT(wsrep_is_active(thd));
mysql_mutex_assert_owner(&thd->LOCK_thd_kill);
thd->killed= thd->wsrep_abort_by_kill;
my_free(thd->killed_err);