mirror of
https://github.com/MariaDB/server.git
synced 2025-01-15 19:42:28 +01:00
Add likely/unlikely to speed up execution
Added to: - if (error) - Lex - sql_yacc.yy and sql_yacc_ora.yy - In header files to alloc() calls - Added thd argument to thd_net_is_killed()
This commit is contained in:
parent
a22a339f8e
commit
30ebc3ee9e
118 changed files with 4874 additions and 4440 deletions
|
@ -63,7 +63,7 @@ class engine_option_value: public Sql_alloc
|
|||
name(name_arg), next(NULL), parsed(false), quoted_value(false)
|
||||
{
|
||||
char *str;
|
||||
if ((value.str= str= (char *)alloc_root(root, 22)))
|
||||
if (likely((value.str= str= (char *)alloc_root(root, 22))))
|
||||
{
|
||||
value.length= longlong10_to_str(value_arg, str, 10) - str;
|
||||
link(start, end);
|
||||
|
|
|
@ -89,10 +89,10 @@ Table_type dd_frm_type(THD *thd, char *path, LEX_CSTRING *engine_name,
|
|||
((char*) (engine_name->str))[0]= 0;
|
||||
}
|
||||
|
||||
if ((error= mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP))))
|
||||
if (unlikely((error= mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP)))))
|
||||
goto err;
|
||||
|
||||
if (!strncmp((char*) header, "TYPE=VIEW\n", 10))
|
||||
if (unlikely((!strncmp((char*) header, "TYPE=VIEW\n", 10))))
|
||||
{
|
||||
type= TABLE_TYPE_VIEW;
|
||||
goto err;
|
||||
|
|
|
@ -790,7 +790,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action)
|
|||
and shall not be reported as a result of SET DEBUG_SYNC.
|
||||
Hence, we check for the first condition above.
|
||||
*/
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
|
@ -1448,7 +1448,7 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
|
|||
DBUG_PRINT("debug_sync",
|
||||
("awoke from %s global: %s error: %d",
|
||||
sig_wait, sig_glob, error));});
|
||||
if (error == ETIMEDOUT || error == ETIME)
|
||||
if (unlikely(error == ETIMEDOUT || error == ETIME))
|
||||
{
|
||||
// We should not make the statement fail, even if in strict mode.
|
||||
const bool save_abort_on_warning= thd->abort_on_warning;
|
||||
|
|
|
@ -232,7 +232,8 @@ static File open_error_msg_file(const char *file_name, const char *language,
|
|||
ret->errors= uint2korr(head+12);
|
||||
ret->sections= uint2korr(head+14);
|
||||
|
||||
if (ret->max_error < error_messages || ret->sections != MAX_ERROR_RANGES)
|
||||
if (unlikely(ret->max_error < error_messages ||
|
||||
ret->sections != MAX_ERROR_RANGES))
|
||||
{
|
||||
sql_print_error("\
|
||||
Error message file '%s' had only %d error messages, but it should contain at least %d error messages.\nCheck that the above file is the right version for this program!",
|
||||
|
@ -276,8 +277,8 @@ bool read_texts(const char *file_name, const char *language,
|
|||
struct st_msg_file msg_file;
|
||||
DBUG_ENTER("read_texts");
|
||||
|
||||
if ((file= open_error_msg_file(file_name, language, error_messages,
|
||||
&msg_file)) == FERR)
|
||||
if (unlikely((file= open_error_msg_file(file_name, language, error_messages,
|
||||
&msg_file)) == FERR))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if (!(*data= (const char***)
|
||||
|
|
|
@ -127,7 +127,7 @@ int writefrm(const char *path, const char *db, const char *table,
|
|||
File file= mysql_file_create(key_file_frm, file_name,
|
||||
CREATE_MODE, create_flags, MYF(0));
|
||||
|
||||
if ((error= file < 0))
|
||||
if (unlikely((error= file < 0)))
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
my_error(ER_BAD_DB_ERROR, MYF(0), db);
|
||||
|
|
|
@ -1444,7 +1444,7 @@ Event_job_data::execute(THD *thd, bool drop)
|
|||
}
|
||||
|
||||
end:
|
||||
if (drop && !thd->is_fatal_error)
|
||||
if (drop && likely(!thd->is_fatal_error))
|
||||
{
|
||||
/*
|
||||
We must do it here since here we're under the right authentication
|
||||
|
|
|
@ -320,7 +320,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
|
|||
enum_binlog_format save_binlog_format;
|
||||
DBUG_ENTER("Events::create_event");
|
||||
|
||||
if (check_if_system_tables_error())
|
||||
if (unlikely(check_if_system_tables_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/*
|
||||
|
@ -455,7 +455,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
|
|||
|
||||
DBUG_ENTER("Events::update_event");
|
||||
|
||||
if (check_if_system_tables_error())
|
||||
if (unlikely(check_if_system_tables_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (parse_data->check_parse_data(thd) || parse_data->do_not_create)
|
||||
|
@ -589,7 +589,7 @@ Events::drop_event(THD *thd, const LEX_CSTRING *dbname,
|
|||
enum_binlog_format save_binlog_format;
|
||||
DBUG_ENTER("Events::drop_event");
|
||||
|
||||
if (check_if_system_tables_error())
|
||||
if (unlikely(check_if_system_tables_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (check_access(thd, EVENT_ACL, dbname->str, NULL, NULL, 0, 0))
|
||||
|
@ -761,7 +761,7 @@ Events::show_create_event(THD *thd, const LEX_CSTRING *dbname,
|
|||
DBUG_ENTER("Events::show_create_event");
|
||||
DBUG_PRINT("enter", ("name: %s@%s", dbname->str, name->str));
|
||||
|
||||
if (check_if_system_tables_error())
|
||||
if (unlikely(check_if_system_tables_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (check_access(thd, EVENT_ACL, dbname->str, NULL, NULL, 0, 0))
|
||||
|
@ -817,7 +817,7 @@ Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */)
|
|||
if (opt_noacl)
|
||||
DBUG_RETURN(0);
|
||||
|
||||
if (check_if_system_tables_error())
|
||||
if (unlikely(check_if_system_tables_error()))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
|
|
42
sql/field.cc
42
sql/field.cc
|
@ -1343,7 +1343,7 @@ bool Field::sp_prepare_and_store_item(THD *thd, Item **value)
|
|||
|
||||
expr_item->save_in_field(this, 0);
|
||||
|
||||
if (!thd->is_error())
|
||||
if (likely(!thd->is_error()))
|
||||
DBUG_RETURN(false);
|
||||
|
||||
error:
|
||||
|
@ -1383,7 +1383,7 @@ void Field_num::prepend_zeros(String *value) const
|
|||
if ((diff= (int) (field_length - value->length())) > 0)
|
||||
{
|
||||
const bool error= value->realloc(field_length);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
bmove_upp((uchar*) value->ptr()+field_length,
|
||||
(uchar*) value->ptr()+value->length(),
|
||||
|
@ -1625,7 +1625,7 @@ double Field_real::get_double(const char *str, size_t length, CHARSET_INFO *cs,
|
|||
{
|
||||
char *end;
|
||||
double nr= my_strntod(cs,(char*) str, length, &end, error);
|
||||
if (*error)
|
||||
if (unlikely(*error))
|
||||
{
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
*error= 1;
|
||||
|
@ -3155,7 +3155,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value,
|
|||
*native_error= my_decimal2binary(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW,
|
||||
decimal_value, ptr, precision, dec);
|
||||
|
||||
if (*native_error == E_DEC_OVERFLOW)
|
||||
if (unlikely(*native_error == E_DEC_OVERFLOW))
|
||||
{
|
||||
my_decimal buff;
|
||||
DBUG_PRINT("info", ("overflow"));
|
||||
|
@ -3174,7 +3174,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
|
|||
{
|
||||
int native_error;
|
||||
bool rc= store_value(decimal_value, &native_error);
|
||||
if (!rc && native_error == E_DEC_TRUNCATED)
|
||||
if (unlikely(!rc && native_error == E_DEC_TRUNCATED))
|
||||
set_note(WARN_DATA_TRUNCATED, 1);
|
||||
return rc;
|
||||
}
|
||||
|
@ -4154,7 +4154,7 @@ int Field_long::store(double nr)
|
|||
else
|
||||
res=(int32) (longlong) nr;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
|
||||
int4store(ptr,res);
|
||||
|
@ -4200,7 +4200,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
|
|||
else
|
||||
res=(int32) nr;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
|
||||
int4store(ptr,res);
|
||||
|
@ -4284,7 +4284,7 @@ int Field_longlong::store(const char *from,size_t len,CHARSET_INFO *cs)
|
|||
ulonglong tmp;
|
||||
|
||||
tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error);
|
||||
if (error == MY_ERRNO_ERANGE)
|
||||
if (unlikely(error == MY_ERRNO_ERANGE))
|
||||
{
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
error= 1;
|
||||
|
@ -4304,7 +4304,7 @@ int Field_longlong::store(double nr)
|
|||
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
|
||||
Converter_double_to_longlong conv(nr, unsigned_flag);
|
||||
|
||||
if (conv.error())
|
||||
if (unlikely(conv.error()))
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
|
||||
int8store(ptr, conv.result());
|
||||
|
@ -4317,7 +4317,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val)
|
|||
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
|
||||
int error= 0;
|
||||
|
||||
if (nr < 0) // Only possible error
|
||||
if (unlikely(nr < 0)) // Only possible error
|
||||
{
|
||||
/*
|
||||
if field is unsigned and value is signed (< 0) or
|
||||
|
@ -4466,7 +4466,7 @@ int Field_float::store(double nr)
|
|||
int error= truncate_double(&nr, field_length,
|
||||
not_fixed ? NOT_FIXED_DEC : dec,
|
||||
unsigned_flag, FLT_MAX);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
if (error < 0) // Wrong double value
|
||||
|
@ -4645,7 +4645,7 @@ int Field_double::store(double nr)
|
|||
int error= truncate_double(&nr, field_length,
|
||||
not_fixed ? NOT_FIXED_DEC : dec,
|
||||
unsigned_flag, DBL_MAX);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
|
||||
if (error < 0) // Wrong double value
|
||||
|
@ -4812,7 +4812,7 @@ double Field_double::val_real(void)
|
|||
longlong Field_double::val_int_from_real(bool want_unsigned_result)
|
||||
{
|
||||
Converter_double_to_longlong conv(val_real(), want_unsigned_result);
|
||||
if (!want_unsigned_result && conv.error())
|
||||
if (unlikely(!want_unsigned_result && conv.error()))
|
||||
conv.push_warning(get_thd(), Field_double::val_real(), false);
|
||||
return conv.result();
|
||||
}
|
||||
|
@ -5049,7 +5049,7 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time,
|
|||
timestamp= TIME_to_timestamp(thd, l_time, &conversion_error);
|
||||
if (timestamp == 0 && l_time->second_part == 0)
|
||||
conversion_error= ER_WARN_DATA_OUT_OF_RANGE;
|
||||
if (conversion_error)
|
||||
if (unlikely(conversion_error))
|
||||
{
|
||||
set_datetime_warning(conversion_error,
|
||||
str, MYSQL_TIMESTAMP_DATETIME, !error);
|
||||
|
@ -6241,7 +6241,7 @@ int Field_year::store(const char *from, size_t len,CHARSET_INFO *cs)
|
|||
if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION &&
|
||||
(error= check_int(cs, from, len, end, error)))
|
||||
{
|
||||
if (error == 1) /* empty or incorrect string */
|
||||
if (unlikely(error == 1) /* empty or incorrect string */)
|
||||
{
|
||||
*ptr= 0;
|
||||
return 1;
|
||||
|
@ -6908,7 +6908,7 @@ Field_longstr::check_string_copy_error(const String_copier *copier,
|
|||
const char *pos;
|
||||
char tmp[32];
|
||||
|
||||
if (!(pos= copier->most_important_error_pos()))
|
||||
if (likely(!(pos= copier->most_important_error_pos())))
|
||||
return FALSE;
|
||||
|
||||
convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6);
|
||||
|
@ -7020,10 +7020,10 @@ int Field_str::store(double nr)
|
|||
my_bool error= (local_char_length == 0);
|
||||
|
||||
// my_gcvt() requires width > 0, and we may have a CHAR(0) column.
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
length= my_gcvt(nr, MY_GCVT_ARG_DOUBLE, local_char_length, buff, &error);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (get_thd()->abort_on_warning)
|
||||
set_warning(ER_DATA_TOO_LONG, 1);
|
||||
|
@ -10273,7 +10273,7 @@ bool check_expression(Virtual_column_info *vcol, LEX_CSTRING *name,
|
|||
if (type == VCOL_GENERATED_VIRTUAL)
|
||||
filter|= VCOL_NOT_VIRTUAL;
|
||||
|
||||
if (ret || (res.errors & filter))
|
||||
if (unlikely(ret || (res.errors & filter)))
|
||||
{
|
||||
my_error(ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name,
|
||||
vcol_type_name(type), name->str);
|
||||
|
@ -11255,8 +11255,8 @@ bool Field::save_in_field_default_value(bool view_error_processing)
|
|||
{
|
||||
THD *thd= table->in_use;
|
||||
|
||||
if (flags & NO_DEFAULT_VALUE_FLAG &&
|
||||
real_type() != MYSQL_TYPE_ENUM)
|
||||
if (unlikely(flags & NO_DEFAULT_VALUE_FLAG &&
|
||||
real_type() != MYSQL_TYPE_ENUM))
|
||||
{
|
||||
if (reset())
|
||||
{
|
||||
|
|
|
@ -487,10 +487,11 @@ static void do_cut_string_complex(Copy_field *copy)
|
|||
memcpy(copy->to_ptr, copy->from_ptr, copy_length);
|
||||
|
||||
/* Check if we lost any important characters */
|
||||
if (prefix.well_formed_error_pos() ||
|
||||
cs->cset->scan(cs, (char*) copy->from_ptr + copy_length,
|
||||
(char*) from_end,
|
||||
MY_SEQ_SPACES) < (copy->from_length - copy_length))
|
||||
if (unlikely(prefix.well_formed_error_pos() ||
|
||||
cs->cset->scan(cs, (char*) copy->from_ptr + copy_length,
|
||||
(char*) from_end,
|
||||
MY_SEQ_SPACES) <
|
||||
(copy->from_length - copy_length)))
|
||||
{
|
||||
copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
|
||||
WARN_DATA_TRUNCATED, 1);
|
||||
|
|
|
@ -376,7 +376,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
|
|||
}
|
||||
}
|
||||
tracker->report_merge_passes_at_end(thd->query_plan_fsort_passes);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
int kill_errno= thd->killed_errno();
|
||||
DBUG_ASSERT(thd->is_error() || kill_errno || thd->killed == ABORT_QUERY);
|
||||
|
@ -414,7 +414,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
|
|||
(longlong) sort->found_rows));
|
||||
MYSQL_FILESORT_DONE(error, num_rows);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
delete sort;
|
||||
sort= 0;
|
||||
|
@ -742,7 +742,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||
next_pos=(uchar*) 0; /* Find records in sequence */
|
||||
DBUG_EXECUTE_IF("bug14365043_1",
|
||||
DBUG_SET("+d,ha_rnd_init_fail"););
|
||||
if (file->ha_rnd_init_with_error(1))
|
||||
if (unlikely(file->ha_rnd_init_with_error(1)))
|
||||
DBUG_RETURN(HA_POS_ERROR);
|
||||
file->extra_opt(HA_EXTRA_CACHE, thd->variables.read_buff_size);
|
||||
}
|
||||
|
@ -779,7 +779,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||
{
|
||||
if (quick_select)
|
||||
{
|
||||
if ((error= select->quick->get_next()))
|
||||
if (unlikely((error= select->quick->get_next())))
|
||||
break;
|
||||
file->position(sort_form->record[0]);
|
||||
DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE););
|
||||
|
@ -793,14 +793,14 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||
my_store_ptr(ref_pos,ref_length,record); // Position to row
|
||||
record+= sort_form->s->db_record_offset;
|
||||
}
|
||||
else if (!error)
|
||||
else if (likely(!error))
|
||||
file->position(sort_form->record[0]);
|
||||
}
|
||||
if (error && error != HA_ERR_RECORD_DELETED)
|
||||
if (unlikely(error && error != HA_ERR_RECORD_DELETED))
|
||||
break;
|
||||
}
|
||||
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
DBUG_PRINT("info",("Sort killed by user"));
|
||||
if (!quick_select)
|
||||
|
@ -812,7 +812,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||
}
|
||||
|
||||
bool write_record= false;
|
||||
if (error == 0)
|
||||
if (likely(error == 0))
|
||||
{
|
||||
param->examined_rows++;
|
||||
if (select && select->cond)
|
||||
|
@ -865,7 +865,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||
}
|
||||
|
||||
/* It does not make sense to read more keys in case of a fatal error */
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -885,11 +885,11 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
|
|||
/* Signal we should use orignal column read and write maps */
|
||||
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(HA_POS_ERROR);
|
||||
|
||||
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error != HA_ERR_END_OF_FILE))
|
||||
{
|
||||
file->print_error(error,MYF(ME_ERROR | ME_WAITTANG)); // purecov: inspected
|
||||
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
|
||||
|
@ -1507,27 +1507,28 @@ cleanup:
|
|||
/**
|
||||
Read data to buffer.
|
||||
|
||||
@retval
|
||||
(uint)-1 if something goes wrong
|
||||
@retval Number of bytes read
|
||||
(uint)-1 if something goes wrong
|
||||
*/
|
||||
|
||||
uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
|
||||
uint rec_length)
|
||||
{
|
||||
uint count;
|
||||
uint length;
|
||||
uint length= 0;
|
||||
|
||||
if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
|
||||
{
|
||||
if (my_b_pread(fromfile, (uchar*) buffpek->base,
|
||||
(length= rec_length*count), buffpek->file_pos))
|
||||
length= rec_length*count;
|
||||
if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length,
|
||||
buffpek->file_pos)))
|
||||
return ((uint) -1);
|
||||
buffpek->key=buffpek->base;
|
||||
buffpek->file_pos+= length; /* New filepos */
|
||||
buffpek->count-= count;
|
||||
buffpek->mem_count= count;
|
||||
}
|
||||
return (count*rec_length);
|
||||
return (length);
|
||||
} /* read_to_buffer */
|
||||
|
||||
|
||||
|
@ -1648,7 +1649,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
|
|||
strpos+=
|
||||
(uint) (error= (int) read_to_buffer(from_file, buffpek, rec_length));
|
||||
|
||||
if (error == -1)
|
||||
if (unlikely(error == -1))
|
||||
goto err; /* purecov: inspected */
|
||||
buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected
|
||||
queue_insert(&queue, (uchar*) buffpek);
|
||||
|
@ -1669,13 +1670,13 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
|
|||
buffpek->key+= rec_length;
|
||||
if (! --buffpek->mem_count)
|
||||
{
|
||||
if (!(error= (int) read_to_buffer(from_file, buffpek,
|
||||
rec_length)))
|
||||
if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
|
||||
rec_length))))
|
||||
{
|
||||
(void) queue_remove_top(&queue);
|
||||
reuse_freed_buff(&queue, buffpek, rec_length);
|
||||
}
|
||||
else if (error == -1)
|
||||
else if (unlikely(error == -1))
|
||||
goto err; /* purecov: inspected */
|
||||
}
|
||||
queue_replace_top(&queue); // Top element has been used
|
||||
|
@ -1685,7 +1686,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
|
|||
|
||||
while (queue.elements > 1)
|
||||
{
|
||||
if (killable && thd->check_killed())
|
||||
if (killable && unlikely(thd->check_killed()))
|
||||
{
|
||||
error= 1; goto err; /* purecov: inspected */
|
||||
}
|
||||
|
@ -1746,8 +1747,8 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
|
|||
buffpek->key+= rec_length;
|
||||
if (! --buffpek->mem_count)
|
||||
{
|
||||
if (!(error= (int) read_to_buffer(from_file, buffpek,
|
||||
rec_length)))
|
||||
if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
|
||||
rec_length))))
|
||||
{
|
||||
(void) queue_remove_top(&queue);
|
||||
reuse_freed_buff(&queue, buffpek, rec_length);
|
||||
|
@ -1837,8 +1838,8 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
|
|||
}
|
||||
}
|
||||
}
|
||||
while ((error=(int) read_to_buffer(from_file, buffpek, rec_length))
|
||||
!= -1 && error != 0);
|
||||
while (likely((error=(int) read_to_buffer(from_file, buffpek, rec_length))
|
||||
!= -1 && error != 0));
|
||||
|
||||
end:
|
||||
lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows);
|
||||
|
|
|
@ -140,7 +140,8 @@ void insert_into_hash(hash_lex_struct *root, const char *name,
|
|||
if (root->first_char>(*name))
|
||||
{
|
||||
size_t new_size= root->last_char-(*name)+1;
|
||||
if (new_size<real_size) printf("error!!!!\n");
|
||||
if (unlikely(new_size<real_size))
|
||||
printf("error!!!!\n");
|
||||
tails= root->char_tails;
|
||||
tails= (hash_lex_struct*)realloc((char*)tails,
|
||||
sizeof(hash_lex_struct)*new_size);
|
||||
|
@ -155,7 +156,8 @@ void insert_into_hash(hash_lex_struct *root, const char *name,
|
|||
if (root->last_char<(*name))
|
||||
{
|
||||
size_t new_size= (*name)-root->first_char+1;
|
||||
if (new_size<real_size) printf("error!!!!\n");
|
||||
if (unlikely(new_size<real_size))
|
||||
printf("error!!!!\n");
|
||||
tails= root->char_tails;
|
||||
tails= (hash_lex_struct*)realloc((char*)tails,
|
||||
sizeof(hash_lex_struct)*new_size);
|
||||
|
|
|
@ -63,7 +63,7 @@ int Pushdown_query::execute(JOIN *join)
|
|||
|
||||
while (!(err= handler->next_row()))
|
||||
{
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
handler->end_scan();
|
||||
|
@ -78,7 +78,7 @@ int Pushdown_query::execute(JOIN *join)
|
|||
if ((err= table->file->ha_write_tmp_row(table->record[0])))
|
||||
{
|
||||
bool is_duplicate;
|
||||
if (!table->file->is_fatal_error(err, HA_CHECK_DUP))
|
||||
if (likely(!table->file->is_fatal_error(err, HA_CHECK_DUP)))
|
||||
continue; // Distinct elimination
|
||||
|
||||
if (create_internal_tmp_table_from_heap(thd, table,
|
||||
|
@ -98,7 +98,7 @@ int Pushdown_query::execute(JOIN *join)
|
|||
{
|
||||
int error;
|
||||
/* result < 0 if row was not accepted and should not be counted */
|
||||
if ((error= join->result->send_data(*join->fields)))
|
||||
if (unlikely((error= join->result->send_data(*join->fields))))
|
||||
{
|
||||
handler->end_scan();
|
||||
DBUG_RETURN(error < 0 ? 0 : -1);
|
||||
|
|
|
@ -738,12 +738,16 @@ int ha_partition::create(const char *name, TABLE *table_arg,
|
|||
for (j= 0; j < m_part_info->num_subparts; j++)
|
||||
{
|
||||
part_elem= sub_it++;
|
||||
if ((error= create_partition_name(name_buff, sizeof(name_buff), path,
|
||||
name_buffer_ptr, NORMAL_PART_NAME, FALSE)))
|
||||
if (unlikely((error= create_partition_name(name_buff,
|
||||
sizeof(name_buff), path,
|
||||
name_buffer_ptr,
|
||||
NORMAL_PART_NAME, FALSE))))
|
||||
goto create_error;
|
||||
if ((error= set_up_table_before_create(table_arg, name_buff,
|
||||
create_info, part_elem)) ||
|
||||
((error= (*file)->ha_create(name_buff, table_arg, create_info))))
|
||||
if (unlikely((error= set_up_table_before_create(table_arg, name_buff,
|
||||
create_info,
|
||||
part_elem)) ||
|
||||
((error= (*file)->ha_create(name_buff, table_arg,
|
||||
create_info)))))
|
||||
goto create_error;
|
||||
|
||||
name_buffer_ptr= strend(name_buffer_ptr) + 1;
|
||||
|
@ -752,12 +756,15 @@ int ha_partition::create(const char *name, TABLE *table_arg,
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= create_partition_name(name_buff, sizeof(name_buff), path,
|
||||
name_buffer_ptr, NORMAL_PART_NAME, FALSE)))
|
||||
if (unlikely((error= create_partition_name(name_buff, sizeof(name_buff),
|
||||
path, name_buffer_ptr,
|
||||
NORMAL_PART_NAME, FALSE))))
|
||||
goto create_error;
|
||||
if ((error= set_up_table_before_create(table_arg, name_buff,
|
||||
create_info, part_elem)) ||
|
||||
((error= (*file)->ha_create(name_buff, table_arg, create_info))))
|
||||
if (unlikely((error= set_up_table_before_create(table_arg, name_buff,
|
||||
create_info,
|
||||
part_elem)) ||
|
||||
((error= (*file)->ha_create(name_buff, table_arg,
|
||||
create_info)))))
|
||||
goto create_error;
|
||||
|
||||
name_buffer_ptr= strend(name_buffer_ptr) + 1;
|
||||
|
@ -832,16 +839,19 @@ int ha_partition::drop_partitions(const char *path)
|
|||
{
|
||||
partition_element *sub_elem= sub_it++;
|
||||
part= i * num_subparts + j;
|
||||
if ((ret_error= create_subpartition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name, name_variant)))
|
||||
if (unlikely((ret_error=
|
||||
create_subpartition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
name_variant))))
|
||||
error= ret_error;
|
||||
file= m_file[part];
|
||||
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
|
||||
if ((ret_error= file->ha_delete_table(part_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_delete_table(part_name_buff))))
|
||||
error= ret_error;
|
||||
if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
} while (++j < num_subparts);
|
||||
}
|
||||
|
@ -855,9 +865,10 @@ int ha_partition::drop_partitions(const char *path)
|
|||
{
|
||||
file= m_file[i];
|
||||
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
|
||||
if ((ret_error= file->ha_delete_table(part_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_delete_table(part_name_buff))))
|
||||
error= ret_error;
|
||||
if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
}
|
||||
}
|
||||
|
@ -938,15 +949,18 @@ int ha_partition::rename_partitions(const char *path)
|
|||
{
|
||||
sub_elem= sub_it++;
|
||||
file= m_reorged_file[part_count++];
|
||||
if ((ret_error= create_subpartition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name, NORMAL_PART_NAME)))
|
||||
if (unlikely((ret_error=
|
||||
create_subpartition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
NORMAL_PART_NAME))))
|
||||
error= ret_error;
|
||||
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
|
||||
if ((ret_error= file->ha_delete_table(norm_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_delete_table(norm_name_buff))))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
else
|
||||
sub_elem->log_entry= NULL; /* Indicate success */
|
||||
|
@ -955,16 +969,19 @@ int ha_partition::rename_partitions(const char *path)
|
|||
else
|
||||
{
|
||||
file= m_reorged_file[part_count++];
|
||||
if ((ret_error= create_partition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name, NORMAL_PART_NAME, TRUE)))
|
||||
if (unlikely((ret_error=
|
||||
create_partition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
NORMAL_PART_NAME, TRUE))))
|
||||
error= ret_error;
|
||||
else
|
||||
{
|
||||
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
|
||||
if ((ret_error= file->ha_delete_table(norm_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_delete_table(norm_name_buff))))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
else
|
||||
part_elem->log_entry= NULL; /* Indicate success */
|
||||
|
@ -1011,33 +1028,39 @@ int ha_partition::rename_partitions(const char *path)
|
|||
{
|
||||
sub_elem= sub_it++;
|
||||
part= i * num_subparts + j;
|
||||
if ((ret_error= create_subpartition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name, NORMAL_PART_NAME)))
|
||||
if (unlikely((ret_error=
|
||||
create_subpartition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
NORMAL_PART_NAME))))
|
||||
error= ret_error;
|
||||
if (part_elem->part_state == PART_IS_CHANGED)
|
||||
{
|
||||
file= m_reorged_file[part_count++];
|
||||
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
|
||||
if ((ret_error= file->ha_delete_table(norm_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_delete_table(norm_name_buff))))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
(void) sync_ddl_log();
|
||||
}
|
||||
file= m_new_file[part];
|
||||
if ((ret_error= create_subpartition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name, TEMP_PART_NAME)))
|
||||
if (unlikely((ret_error=
|
||||
create_subpartition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
TEMP_PART_NAME))))
|
||||
error= ret_error;
|
||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
||||
part_name_buff, norm_name_buff));
|
||||
if ((ret_error= file->ha_rename_table(part_name_buff,
|
||||
norm_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_rename_table(part_name_buff,
|
||||
norm_name_buff))))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||
else if (unlikely(deactivate_ddl_log_entry(sub_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
else
|
||||
sub_elem->log_entry= NULL;
|
||||
|
@ -1045,12 +1068,17 @@ int ha_partition::rename_partitions(const char *path)
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((ret_error= create_partition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name, NORMAL_PART_NAME, TRUE)) ||
|
||||
(ret_error= create_partition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name, TEMP_PART_NAME, TRUE)))
|
||||
if (unlikely((ret_error=
|
||||
create_partition_name(norm_name_buff,
|
||||
sizeof(norm_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
NORMAL_PART_NAME, TRUE)) ||
|
||||
(ret_error= create_partition_name(part_name_buff,
|
||||
sizeof(part_name_buff),
|
||||
path,
|
||||
part_elem->
|
||||
partition_name,
|
||||
TEMP_PART_NAME, TRUE))))
|
||||
error= ret_error;
|
||||
else
|
||||
{
|
||||
|
@ -1058,19 +1086,21 @@ int ha_partition::rename_partitions(const char *path)
|
|||
{
|
||||
file= m_reorged_file[part_count++];
|
||||
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
|
||||
if ((ret_error= file->ha_delete_table(norm_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_delete_table(norm_name_buff))))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
(void) sync_ddl_log();
|
||||
}
|
||||
file= m_new_file[i];
|
||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
||||
part_name_buff, norm_name_buff));
|
||||
if ((ret_error= file->ha_rename_table(part_name_buff,
|
||||
norm_name_buff)))
|
||||
if (unlikely((ret_error= file->ha_rename_table(part_name_buff,
|
||||
norm_name_buff))))
|
||||
error= ret_error;
|
||||
else if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||
else if (unlikely(deactivate_ddl_log_entry(part_elem->log_entry->
|
||||
entry_pos)))
|
||||
error= 1;
|
||||
else
|
||||
part_elem->log_entry= NULL;
|
||||
|
@ -1394,7 +1424,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
|
|||
part= i * num_subparts + j;
|
||||
DBUG_PRINT("info", ("Optimize subpartition %u (%s)",
|
||||
part, sub_elem->partition_name));
|
||||
if ((error= handle_opt_part(thd, check_opt, part, flag)))
|
||||
if (unlikely((error= handle_opt_part(thd, check_opt, part, flag))))
|
||||
{
|
||||
/* print a line which partition the error belongs to */
|
||||
if (error != HA_ADMIN_NOT_IMPLEMENTED &&
|
||||
|
@ -1421,7 +1451,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
|
|||
{
|
||||
DBUG_PRINT("info", ("Optimize partition %u (%s)", i,
|
||||
part_elem->partition_name));
|
||||
if ((error= handle_opt_part(thd, check_opt, i, flag)))
|
||||
if (unlikely((error= handle_opt_part(thd, check_opt, i, flag))))
|
||||
{
|
||||
/* print a line which partition the error belongs to */
|
||||
if (error != HA_ADMIN_NOT_IMPLEMENTED &&
|
||||
|
@ -1554,7 +1584,8 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
|
|||
truncate_partition_filename((char*) p_elem->data_file_name);
|
||||
truncate_partition_filename((char*) p_elem->index_file_name);
|
||||
|
||||
if ((error= set_up_table_before_create(tbl, part_name, create_info, p_elem)))
|
||||
if (unlikely((error= set_up_table_before_create(tbl, part_name, create_info,
|
||||
p_elem))))
|
||||
goto error_create;
|
||||
|
||||
if (!(file->ht->flags & HTON_CAN_READ_CONNECT_STRING_IN_PARTITION))
|
||||
|
@ -1573,8 +1604,8 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
|
|||
goto error_create;
|
||||
}
|
||||
DBUG_PRINT("info", ("partition %s created", part_name));
|
||||
if ((error= file->ha_open(tbl, part_name, m_mode,
|
||||
m_open_test_lock | HA_OPEN_NO_PSI_CALL)))
|
||||
if (unlikely((error= file->ha_open(tbl, part_name, m_mode,
|
||||
m_open_test_lock | HA_OPEN_NO_PSI_CALL))))
|
||||
goto error_open;
|
||||
DBUG_PRINT("info", ("partition %s opened", part_name));
|
||||
|
||||
|
@ -1584,7 +1615,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
|
|||
assumes that external_lock() is last call that may fail here.
|
||||
Otherwise see description for cleanup_new_partition().
|
||||
*/
|
||||
if ((error= file->ha_external_lock(ha_thd(), F_WRLCK)))
|
||||
if (unlikely((error= file->ha_external_lock(ha_thd(), F_WRLCK))))
|
||||
goto error_external_lock;
|
||||
DBUG_PRINT("info", ("partition %s external locked", part_name));
|
||||
|
||||
|
@ -1920,21 +1951,24 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
|||
do
|
||||
{
|
||||
partition_element *sub_elem= sub_it++;
|
||||
if ((error= create_subpartition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name, sub_elem->partition_name,
|
||||
name_variant)))
|
||||
if (unlikely((error=
|
||||
create_subpartition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
sub_elem->partition_name,
|
||||
name_variant))))
|
||||
{
|
||||
cleanup_new_partition(part_count);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
part= i * num_subparts + j;
|
||||
DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
|
||||
if ((error= prepare_new_partition(table, create_info,
|
||||
new_file_array[part],
|
||||
(const char *)part_name_buff,
|
||||
sub_elem,
|
||||
disable_non_uniq_indexes)))
|
||||
if (unlikely((error=
|
||||
prepare_new_partition(table, create_info,
|
||||
new_file_array[part],
|
||||
(const char *)part_name_buff,
|
||||
sub_elem,
|
||||
disable_non_uniq_indexes))))
|
||||
{
|
||||
cleanup_new_partition(part_count);
|
||||
DBUG_RETURN(error);
|
||||
|
@ -1945,20 +1979,23 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= create_partition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path, part_elem->partition_name,
|
||||
name_variant, TRUE)))
|
||||
if (unlikely((error=
|
||||
create_partition_name(part_name_buff,
|
||||
sizeof(part_name_buff), path,
|
||||
part_elem->partition_name,
|
||||
name_variant, TRUE))))
|
||||
{
|
||||
cleanup_new_partition(part_count);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("Add partition %s", part_name_buff));
|
||||
if ((error= prepare_new_partition(table, create_info,
|
||||
new_file_array[i],
|
||||
(const char *)part_name_buff,
|
||||
part_elem,
|
||||
disable_non_uniq_indexes)))
|
||||
if (unlikely((error=
|
||||
prepare_new_partition(table, create_info,
|
||||
new_file_array[i],
|
||||
(const char *)part_name_buff,
|
||||
part_elem,
|
||||
disable_non_uniq_indexes))))
|
||||
{
|
||||
cleanup_new_partition(part_count);
|
||||
DBUG_RETURN(error);
|
||||
|
@ -1992,7 +2029,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
|||
part_elem->part_state= PART_TO_BE_DROPPED;
|
||||
}
|
||||
m_new_file= new_file_array;
|
||||
if ((error= copy_partitions(copied, deleted)))
|
||||
if (unlikely((error= copy_partitions(copied, deleted))))
|
||||
{
|
||||
/*
|
||||
Close and unlock the new temporary partitions.
|
||||
|
@ -2049,7 +2086,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
|
|||
uint32 new_part;
|
||||
|
||||
late_extra_cache(reorg_part);
|
||||
if ((result= file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely((result= file->ha_rnd_init_with_error(1))))
|
||||
goto init_error;
|
||||
while (TRUE)
|
||||
{
|
||||
|
@ -2331,7 +2368,7 @@ uint ha_partition::del_ren_table(const char *from, const char *to)
|
|||
Delete table, start by delete the .par file. If error, break, otherwise
|
||||
delete as much as possible.
|
||||
*/
|
||||
if ((error= handler::delete_table(from)))
|
||||
if (unlikely((error= handler::delete_table(from))))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
/*
|
||||
|
@ -2347,17 +2384,19 @@ uint ha_partition::del_ren_table(const char *from, const char *to)
|
|||
i= 0;
|
||||
do
|
||||
{
|
||||
if ((error= create_partition_name(from_buff, sizeof(from_buff), from_path,
|
||||
name_buffer_ptr, NORMAL_PART_NAME, FALSE)))
|
||||
if (unlikely((error= create_partition_name(from_buff, sizeof(from_buff),
|
||||
from_path, name_buffer_ptr,
|
||||
NORMAL_PART_NAME, FALSE))))
|
||||
goto rename_error;
|
||||
|
||||
if (to != NULL)
|
||||
{ // Rename branch
|
||||
if ((error= create_partition_name(to_buff, sizeof(to_buff), to_path,
|
||||
name_buffer_ptr, NORMAL_PART_NAME, FALSE)))
|
||||
if (unlikely((error= create_partition_name(to_buff, sizeof(to_buff),
|
||||
to_path, name_buffer_ptr,
|
||||
NORMAL_PART_NAME, FALSE))))
|
||||
goto rename_error;
|
||||
error= (*file)->ha_rename_table(from_buff, to_buff);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto rename_error;
|
||||
}
|
||||
else // delete branch
|
||||
|
@ -2365,13 +2404,13 @@ uint ha_partition::del_ren_table(const char *from, const char *to)
|
|||
error= (*file)->ha_delete_table(from_buff);
|
||||
}
|
||||
name_buffer_ptr= strend(name_buffer_ptr) + 1;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
save_error= error;
|
||||
i++;
|
||||
} while (*(++file));
|
||||
if (to != NULL)
|
||||
{
|
||||
if ((error= handler::rename_table(from, to)))
|
||||
if (unlikely((error= handler::rename_table(from, to))))
|
||||
{
|
||||
/* Try to revert everything, ignore errors */
|
||||
(void) handler::rename_table(to, from);
|
||||
|
@ -3486,7 +3525,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
if (init_partition_bitmaps())
|
||||
goto err_alloc;
|
||||
|
||||
if ((error= m_part_info->set_partition_bitmaps(m_partitions_to_open)))
|
||||
if (unlikely((error=
|
||||
m_part_info->set_partition_bitmaps(m_partitions_to_open))))
|
||||
goto err_alloc;
|
||||
|
||||
/* Allocate memory used with MMR */
|
||||
|
@ -3535,8 +3575,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
if (!bitmap_is_set(&m_is_clone_of->m_opened_partitions, i))
|
||||
continue;
|
||||
|
||||
if ((error= create_partition_name(name_buff, sizeof(name_buff), name,
|
||||
name_buffer_ptr, NORMAL_PART_NAME, FALSE)))
|
||||
if (unlikely((error= create_partition_name(name_buff, sizeof(name_buff),
|
||||
name, name_buffer_ptr,
|
||||
NORMAL_PART_NAME, FALSE))))
|
||||
goto err_handler;
|
||||
/* ::clone() will also set ha_share from the original. */
|
||||
if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root)))
|
||||
|
@ -3553,7 +3594,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= open_read_partitions(name_buff, sizeof(name_buff))))
|
||||
if (unlikely((error= open_read_partitions(name_buff, sizeof(name_buff)))))
|
||||
goto err_handler;
|
||||
m_num_locks= m_file_sample->lock_count();
|
||||
}
|
||||
|
@ -3886,7 +3927,7 @@ int ha_partition::external_lock(THD *thd, int lock_type)
|
|||
i= bitmap_get_next_set(used_partitions, i))
|
||||
{
|
||||
DBUG_PRINT("info", ("external_lock(thd, %d) part %u", lock_type, i));
|
||||
if ((error= m_file[i]->ha_external_lock(thd, lock_type)))
|
||||
if (unlikely((error= m_file[i]->ha_external_lock(thd, lock_type))))
|
||||
{
|
||||
if (lock_type != F_UNLCK)
|
||||
goto err_handler;
|
||||
|
@ -4050,7 +4091,7 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
|
|||
i < m_tot_parts;
|
||||
i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
|
||||
{
|
||||
if ((error= m_file[i]->start_stmt(thd, lock_type)))
|
||||
if (unlikely((error= m_file[i]->start_stmt(thd, lock_type))))
|
||||
break;
|
||||
/* Add partition to be called in reset(). */
|
||||
bitmap_set_bit(&m_partitions_to_reset, i);
|
||||
|
@ -4235,7 +4276,7 @@ int ha_partition::write_row(uchar * buf)
|
|||
it is highly likely that we will not be able to insert it into
|
||||
the correct partition. We must check and fail if neccessary.
|
||||
*/
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
|
@ -4346,9 +4387,10 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data)
|
|||
DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id));
|
||||
#endif
|
||||
|
||||
if ((error= get_part_for_buf(new_data, m_rec0, m_part_info, &new_part_id)))
|
||||
if (unlikely((error= get_part_for_buf(new_data, m_rec0, m_part_info,
|
||||
&new_part_id))))
|
||||
goto exit;
|
||||
if (!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id))
|
||||
if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id)))
|
||||
{
|
||||
error= HA_ERR_NOT_IN_LOCK_PARTITIONS;
|
||||
goto exit;
|
||||
|
@ -4385,13 +4427,13 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data)
|
|||
error= m_file[new_part_id]->ha_write_row((uchar*) new_data);
|
||||
reenable_binlog(thd);
|
||||
table->next_number_field= saved_next_number_field;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto exit;
|
||||
|
||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||
error= m_file[old_part_id]->ha_delete_row(old_data);
|
||||
reenable_binlog(thd);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -4535,7 +4577,7 @@ int ha_partition::delete_all_rows()
|
|||
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
|
||||
{
|
||||
/* Can be pruned, like DELETE FROM t PARTITION (pX) */
|
||||
if ((error= m_file[i]->ha_delete_all_rows()))
|
||||
if (unlikely((error= m_file[i]->ha_delete_all_rows())))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
|
@ -4567,7 +4609,7 @@ int ha_partition::truncate()
|
|||
file= m_file;
|
||||
do
|
||||
{
|
||||
if ((error= (*file)->ha_truncate()))
|
||||
if (unlikely((error= (*file)->ha_truncate())))
|
||||
DBUG_RETURN(error);
|
||||
} while (*(++file));
|
||||
DBUG_RETURN(0);
|
||||
|
@ -4625,7 +4667,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
|
|||
part= i * num_subparts + j;
|
||||
DBUG_PRINT("info", ("truncate subpartition %u (%s)",
|
||||
part, sub_elem->partition_name));
|
||||
if ((error= m_file[part]->ha_truncate()))
|
||||
if (unlikely((error= m_file[part]->ha_truncate())))
|
||||
break;
|
||||
sub_elem->part_state= PART_NORMAL;
|
||||
} while (++j < num_subparts);
|
||||
|
@ -4901,7 +4943,7 @@ int ha_partition::rnd_init(bool scan)
|
|||
i < m_tot_parts;
|
||||
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
|
||||
{
|
||||
if ((error= m_file[i]->ha_rnd_init(scan)))
|
||||
if (unlikely((error= m_file[i]->ha_rnd_init(scan))))
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -5365,7 +5407,7 @@ int ha_partition::index_init(uint inx, bool sorted)
|
|||
i < m_tot_parts;
|
||||
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
|
||||
{
|
||||
if ((error= m_file[i]->ha_index_init(inx, sorted)))
|
||||
if (unlikely((error= m_file[i]->ha_index_init(inx, sorted))))
|
||||
goto err;
|
||||
|
||||
DBUG_EXECUTE_IF("ha_partition_fail_index_init", {
|
||||
|
@ -5375,7 +5417,7 @@ int ha_partition::index_init(uint inx, bool sorted)
|
|||
});
|
||||
}
|
||||
err:
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
/* End the previously initialized indexes. */
|
||||
uint j;
|
||||
|
@ -5570,7 +5612,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key)
|
|||
m_start_key.keypart_map, m_start_key.flag, key_len));
|
||||
DBUG_ASSERT(key_len);
|
||||
}
|
||||
if ((error= partition_scan_set_up(buf, have_start_key)))
|
||||
if (unlikely((error= partition_scan_set_up(buf, have_start_key))))
|
||||
{
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -5596,7 +5638,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key)
|
|||
*/
|
||||
DBUG_PRINT("info", ("doing unordered scan"));
|
||||
error= handle_pre_scan(FALSE, FALSE);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= handle_unordered_scan_next_partition(buf);
|
||||
}
|
||||
else
|
||||
|
@ -5686,12 +5728,12 @@ int ha_partition::common_first_last(uchar *buf)
|
|||
{
|
||||
int error;
|
||||
|
||||
if ((error= partition_scan_set_up(buf, FALSE)))
|
||||
if (unlikely((error= partition_scan_set_up(buf, FALSE))))
|
||||
return error;
|
||||
if (!m_ordered_scan_ongoing &&
|
||||
m_index_scan_type != partition_index_last)
|
||||
{
|
||||
if ((error= handle_pre_scan(FALSE, check_parallel_search())))
|
||||
if (unlikely((error= handle_pre_scan(FALSE, check_parallel_search()))))
|
||||
return error;
|
||||
return handle_unordered_scan_next_partition(buf);
|
||||
}
|
||||
|
@ -5739,8 +5781,8 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index,
|
|||
{
|
||||
error= m_file[part]->ha_index_read_idx_map(buf, index, key,
|
||||
keypart_map, find_flag);
|
||||
if (error != HA_ERR_KEY_NOT_FOUND &&
|
||||
error != HA_ERR_END_OF_FILE)
|
||||
if (likely(error != HA_ERR_KEY_NOT_FOUND &&
|
||||
error != HA_ERR_END_OF_FILE))
|
||||
break;
|
||||
}
|
||||
if (part <= m_part_spec.end_part)
|
||||
|
@ -6236,9 +6278,9 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno,
|
|||
save_part_spec= m_part_spec;
|
||||
|
||||
seq_it= seq->init(seq_init_param, n_ranges, *mrr_mode);
|
||||
if ((error= multi_range_key_create_key(seq, seq_it)))
|
||||
if (unlikely((error= multi_range_key_create_key(seq, seq_it))))
|
||||
{
|
||||
if (error == HA_ERR_END_OF_FILE) // No keys in range
|
||||
if (likely(error == HA_ERR_END_OF_FILE)) // No keys in range
|
||||
{
|
||||
rows= 0;
|
||||
goto calc_cost;
|
||||
|
@ -6355,7 +6397,7 @@ int ha_partition::multi_range_read_init(RANGE_SEQ_IF *seq,
|
|||
|
||||
m_seq_if= seq;
|
||||
m_seq= seq->init(seq_init_param, n_ranges, mrr_mode);
|
||||
if ((error= multi_range_key_create_key(seq, m_seq)))
|
||||
if (unlikely((error= multi_range_key_create_key(seq, m_seq))))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
m_part_seq_if.get_key_info= (seq->get_key_info ?
|
||||
|
@ -6406,12 +6448,12 @@ int ha_partition::multi_range_read_init(RANGE_SEQ_IF *seq,
|
|||
else
|
||||
m_mrr_buffer[i]= *buf;
|
||||
|
||||
if ((error= (*file)->
|
||||
multi_range_read_init(&m_part_seq_if,
|
||||
&m_partition_part_key_multi_range_hld[i],
|
||||
m_part_mrr_range_length[i],
|
||||
mrr_mode,
|
||||
&m_mrr_buffer[i])))
|
||||
if (unlikely((error= (*file)->
|
||||
multi_range_read_init(&m_part_seq_if,
|
||||
&m_partition_part_key_multi_range_hld[i],
|
||||
m_part_mrr_range_length[i],
|
||||
mrr_mode,
|
||||
&m_mrr_buffer[i]))))
|
||||
goto error;
|
||||
m_stock_range_seq[i]= 0;
|
||||
}
|
||||
|
@ -6440,25 +6482,28 @@ int ha_partition::multi_range_read_next(range_id_t *range_info)
|
|||
{
|
||||
if (m_multi_range_read_first)
|
||||
{
|
||||
if ((error= handle_ordered_index_scan(table->record[0], FALSE)))
|
||||
if (unlikely((error= handle_ordered_index_scan(table->record[0],
|
||||
FALSE))))
|
||||
DBUG_RETURN(error);
|
||||
if (!m_pre_calling)
|
||||
m_multi_range_read_first= FALSE;
|
||||
}
|
||||
else if ((error= handle_ordered_next(table->record[0], eq_range)))
|
||||
else if (unlikely((error= handle_ordered_next(table->record[0],
|
||||
eq_range))))
|
||||
DBUG_RETURN(error);
|
||||
*range_info= m_mrr_range_current->ptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_multi_range_read_first)
|
||||
if (unlikely(m_multi_range_read_first))
|
||||
{
|
||||
if ((error= handle_unordered_scan_next_partition(table->record[0])))
|
||||
if (unlikely((error=
|
||||
handle_unordered_scan_next_partition(table->record[0]))))
|
||||
DBUG_RETURN(error);
|
||||
if (!m_pre_calling)
|
||||
m_multi_range_read_first= FALSE;
|
||||
}
|
||||
else if ((error= handle_unordered_next(table->record[0], FALSE)))
|
||||
else if (unlikely((error= handle_unordered_next(table->record[0], FALSE))))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
*range_info=
|
||||
|
@ -6631,7 +6676,7 @@ int ha_partition::ft_init()
|
|||
*/
|
||||
if (m_pre_calling)
|
||||
{
|
||||
if ((error= pre_ft_end()))
|
||||
if (unlikely((error= pre_ft_end())))
|
||||
goto err1;
|
||||
}
|
||||
else
|
||||
|
@ -6642,7 +6687,7 @@ int ha_partition::ft_init()
|
|||
if (bitmap_is_set(&(m_part_info->read_partitions), i))
|
||||
{
|
||||
error= m_pre_calling ? m_file[i]->pre_ft_init() : m_file[i]->ft_init();
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
|
@ -7195,7 +7240,7 @@ int ha_partition::handle_pre_scan(bool reverse_order, bool use_parallel)
|
|||
}
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
error= 0;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
table->status= 0;
|
||||
|
@ -7250,8 +7295,8 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
|
|||
|
||||
if (m_index_scan_type == partition_read_multi_range)
|
||||
{
|
||||
if (!(error= file->
|
||||
multi_range_read_next(&m_range_info[m_part_spec.start_part])))
|
||||
if (likely(!(error= file->
|
||||
multi_range_read_next(&m_range_info[m_part_spec.start_part]))))
|
||||
{
|
||||
m_last_part= m_part_spec.start_part;
|
||||
DBUG_RETURN(0);
|
||||
|
@ -7259,7 +7304,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
|
|||
}
|
||||
else if (m_index_scan_type == partition_read_range)
|
||||
{
|
||||
if (!(error= file->read_range_next()))
|
||||
if (likely(!(error= file->read_range_next())))
|
||||
{
|
||||
m_last_part= m_part_spec.start_part;
|
||||
DBUG_RETURN(0);
|
||||
|
@ -7267,8 +7312,8 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
|
|||
}
|
||||
else if (is_next_same)
|
||||
{
|
||||
if (!(error= file->ha_index_next_same(buf, m_start_key.key,
|
||||
m_start_key.length)))
|
||||
if (likely(!(error= file->ha_index_next_same(buf, m_start_key.key,
|
||||
m_start_key.length))))
|
||||
{
|
||||
m_last_part= m_part_spec.start_part;
|
||||
DBUG_RETURN(0);
|
||||
|
@ -7276,14 +7321,14 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (!(error= file->ha_index_next(buf)))
|
||||
if (likely(!(error= file->ha_index_next(buf))))
|
||||
{
|
||||
m_last_part= m_part_spec.start_part;
|
||||
DBUG_RETURN(0); // Row was in range
|
||||
}
|
||||
}
|
||||
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error == HA_ERR_END_OF_FILE))
|
||||
{
|
||||
m_part_spec.start_part++; // Start using next part
|
||||
error= handle_unordered_scan_next_partition(buf);
|
||||
|
@ -7355,12 +7400,13 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
|
|||
DBUG_ASSERT(FALSE);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
m_last_part= i;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND))
|
||||
if (likely((error != HA_ERR_END_OF_FILE) &&
|
||||
(error != HA_ERR_KEY_NOT_FOUND)))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
/*
|
||||
|
@ -7420,7 +7466,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
|||
error= handle_pre_scan(reverse_order, m_pre_call_use_parallel);
|
||||
else
|
||||
error= handle_pre_scan(reverse_order, check_parallel_search());
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
if (m_key_not_found)
|
||||
|
@ -7484,7 +7530,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
|||
*/
|
||||
error= file->read_range_first(m_start_key.key? &m_start_key: NULL,
|
||||
end_range, eq_range, TRUE);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
memcpy(rec_buf_ptr, table->record[0], m_rec_length);
|
||||
reverse_order= FALSE;
|
||||
break;
|
||||
|
@ -7501,7 +7547,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
|||
bitmap_clear_bit(&m_mrr_used_partitions, i);
|
||||
continue;
|
||||
}
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
memcpy(rec_buf_ptr, table->record[0], m_rec_length);
|
||||
reverse_order= FALSE;
|
||||
|
@ -7525,7 +7571,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
|||
DBUG_ASSERT(FALSE);
|
||||
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||
}
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
found= TRUE;
|
||||
if (!m_using_extended_keys)
|
||||
|
@ -7687,7 +7733,7 @@ int ha_partition::handle_ordered_index_scan_key_not_found()
|
|||
error= m_file[i]->ha_index_next(curr_rec_buf);
|
||||
/* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */
|
||||
DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
DBUG_PRINT("info", ("partition queue_insert(1)"));
|
||||
queue_insert(&m_queue, part_buf);
|
||||
|
@ -7749,7 +7795,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
|
|||
{
|
||||
/* There are partitions not included in the index record queue. */
|
||||
uint old_elements= m_queue.elements;
|
||||
if ((error= handle_ordered_index_scan_key_not_found()))
|
||||
if (unlikely((error= handle_ordered_index_scan_key_not_found())))
|
||||
DBUG_RETURN(error);
|
||||
/*
|
||||
If the queue top changed, i.e. one of the partitions that gave
|
||||
|
@ -7785,9 +7831,9 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
|
|||
bool get_next= FALSE;
|
||||
error= file->multi_range_read_next(&m_range_info[part_id]);
|
||||
DBUG_PRINT("info", ("error: %d", error));
|
||||
if (error == HA_ERR_KEY_NOT_FOUND)
|
||||
if (unlikely(error == HA_ERR_KEY_NOT_FOUND))
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error == HA_ERR_END_OF_FILE))
|
||||
{
|
||||
bitmap_clear_bit(&m_mrr_used_partitions, part_id);
|
||||
DBUG_PRINT("info", ("partition m_queue.elements: %u", m_queue.elements));
|
||||
|
@ -7805,7 +7851,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
|
|||
}
|
||||
get_next= TRUE;
|
||||
}
|
||||
else if (!error)
|
||||
else if (likely(!error))
|
||||
{
|
||||
DBUG_PRINT("info", ("m_range_info[%u])->id: %u", part_id,
|
||||
((PARTITION_KEY_MULTI_RANGE *)
|
||||
|
@ -7900,7 +7946,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
|
|||
error= file->ha_index_next_same(rec_buf, m_start_key.key,
|
||||
m_start_key.length);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error == HA_ERR_END_OF_FILE && m_queue.elements)
|
||||
{
|
||||
|
@ -7957,7 +8003,7 @@ int ha_partition::handle_ordered_prev(uchar *buf)
|
|||
uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS;
|
||||
handler *file= m_file[part_id];
|
||||
|
||||
if ((error= file->ha_index_prev(rec_buf)))
|
||||
if (unlikely((error= file->ha_index_prev(rec_buf))))
|
||||
{
|
||||
if (error == HA_ERR_END_OF_FILE && m_queue.elements)
|
||||
{
|
||||
|
@ -8369,7 +8415,7 @@ int ha_partition::open_read_partitions(char *name_buff, size_t name_buff_size)
|
|||
|
||||
if (is_open && !should_be_open)
|
||||
{
|
||||
if ((error= (*file)->ha_close()))
|
||||
if (unlikely((error= (*file)->ha_close())))
|
||||
goto err_handler;
|
||||
bitmap_clear_bit(&m_opened_partitions, n_file);
|
||||
}
|
||||
|
@ -8378,9 +8424,11 @@ int ha_partition::open_read_partitions(char *name_buff, size_t name_buff_size)
|
|||
if (!is_open && should_be_open)
|
||||
{
|
||||
LEX_CSTRING save_connect_string= table->s->connect_string;
|
||||
if ((error= create_partition_name(name_buff, name_buff_size,
|
||||
table->s->normalized_path.str,
|
||||
name_buffer_ptr, NORMAL_PART_NAME, FALSE)))
|
||||
if (unlikely((error=
|
||||
create_partition_name(name_buff, name_buff_size,
|
||||
table->s->normalized_path.str,
|
||||
name_buffer_ptr, NORMAL_PART_NAME,
|
||||
FALSE))))
|
||||
goto err_handler;
|
||||
if (!((*file)->ht->flags & HTON_CAN_READ_CONNECT_STRING_IN_PARTITION))
|
||||
table->s->connect_string= m_connect_string[(uint)(file-m_file)];
|
||||
|
@ -8411,7 +8459,7 @@ int ha_partition::change_partitions_to_open(List<String> *partition_names)
|
|||
return 0;
|
||||
|
||||
m_partitions_to_open= partition_names;
|
||||
if ((error= m_part_info->set_partition_bitmaps(partition_names)))
|
||||
if (unlikely((error= m_part_info->set_partition_bitmaps(partition_names))))
|
||||
goto err_handler;
|
||||
|
||||
if (m_lock_type != F_UNLCK)
|
||||
|
@ -8426,8 +8474,8 @@ int ha_partition::change_partitions_to_open(List<String> *partition_names)
|
|||
if (bitmap_cmp(&m_opened_partitions, &m_part_info->read_partitions) != 0)
|
||||
return 0;
|
||||
|
||||
if ((error= read_par_file(table->s->normalized_path.str)) ||
|
||||
(error= open_read_partitions(name_buff, sizeof(name_buff))))
|
||||
if (unlikely((error= read_par_file(table->s->normalized_path.str)) ||
|
||||
(error= open_read_partitions(name_buff, sizeof(name_buff)))))
|
||||
goto err_handler;
|
||||
|
||||
clear_handler_file();
|
||||
|
@ -9394,8 +9442,8 @@ ha_rows ha_partition::records()
|
|||
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
|
||||
{
|
||||
ha_rows rows;
|
||||
if ((error= m_file[i]->pre_records()) ||
|
||||
(rows= m_file[i]->records()) == HA_POS_ERROR)
|
||||
if (unlikely((error= m_file[i]->pre_records()) ||
|
||||
(rows= m_file[i]->records()) == HA_POS_ERROR))
|
||||
DBUG_RETURN(HA_POS_ERROR);
|
||||
tot_rows+= rows;
|
||||
}
|
||||
|
@ -10043,7 +10091,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table,
|
|||
ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[0];
|
||||
error= m_file[0]->ha_commit_inplace_alter_table(altered_table,
|
||||
ha_alter_info, commit);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto end;
|
||||
if (ha_alter_info->group_commit_ctx)
|
||||
{
|
||||
|
@ -10354,7 +10402,7 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
/* Only nb_desired_values = 1 makes sense */
|
||||
(*file)->get_auto_increment(offset, increment, 1,
|
||||
&first_value_part, &nb_reserved_values_part);
|
||||
if (first_value_part == ULONGLONG_MAX) // error in one partition
|
||||
if (unlikely(first_value_part == ULONGLONG_MAX)) // error in one partition
|
||||
{
|
||||
*first_value= first_value_part;
|
||||
/* log that the error was between table/partition handler */
|
||||
|
@ -10515,7 +10563,7 @@ int ha_partition::disable_indexes(uint mode)
|
|||
DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions)));
|
||||
for (file= m_file; *file; file++)
|
||||
{
|
||||
if ((error= (*file)->ha_disable_indexes(mode)))
|
||||
if (unlikely((error= (*file)->ha_disable_indexes(mode))))
|
||||
break;
|
||||
}
|
||||
return error;
|
||||
|
@ -10540,7 +10588,7 @@ int ha_partition::enable_indexes(uint mode)
|
|||
DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions)));
|
||||
for (file= m_file; *file; file++)
|
||||
{
|
||||
if ((error= (*file)->ha_enable_indexes(mode)))
|
||||
if (unlikely((error= (*file)->ha_enable_indexes(mode))))
|
||||
break;
|
||||
}
|
||||
return error;
|
||||
|
@ -10565,7 +10613,7 @@ int ha_partition::indexes_are_disabled(void)
|
|||
DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions)));
|
||||
for (file= m_file; *file; file++)
|
||||
{
|
||||
if ((error= (*file)->indexes_are_disabled()))
|
||||
if (unlikely((error= (*file)->indexes_are_disabled())))
|
||||
break;
|
||||
}
|
||||
return error;
|
||||
|
@ -10965,7 +11013,7 @@ int ha_partition::exec_bulk_update(ha_rows *dup_key_found)
|
|||
|
||||
do
|
||||
{
|
||||
if ((error= (*file)->exec_bulk_update(dup_key_found)))
|
||||
if (unlikely((error= (*file)->exec_bulk_update(dup_key_found))))
|
||||
DBUG_RETURN(error);
|
||||
} while (*(++file));
|
||||
DBUG_RETURN(0);
|
||||
|
@ -11130,9 +11178,9 @@ int ha_partition::direct_update_rows_init()
|
|||
bitmap_is_set(&(m_part_info->lock_partitions), i))
|
||||
{
|
||||
file= m_file[i];
|
||||
if ((error= (m_pre_calling ?
|
||||
file->pre_direct_update_rows_init() :
|
||||
file->direct_update_rows_init())))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
file->pre_direct_update_rows_init() :
|
||||
file->direct_update_rows_init()))))
|
||||
{
|
||||
DBUG_PRINT("info", ("partition FALSE by storage engine"));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -11228,14 +11276,14 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result)
|
|||
{
|
||||
if (rnd_seq && (m_pre_calling ? file->pre_inited : file->inited) == NONE)
|
||||
{
|
||||
if ((error= (m_pre_calling ?
|
||||
file->ha_pre_rnd_init(TRUE) :
|
||||
file->ha_rnd_init(TRUE))))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
file->ha_pre_rnd_init(TRUE) :
|
||||
file->ha_rnd_init(TRUE)))))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if ((error= (m_pre_calling ?
|
||||
(file)->pre_direct_update_rows() :
|
||||
(file)->ha_direct_update_rows(&update_rows))))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
(file)->pre_direct_update_rows() :
|
||||
(file)->ha_direct_update_rows(&update_rows)))))
|
||||
{
|
||||
if (rnd_seq)
|
||||
{
|
||||
|
@ -11250,9 +11298,9 @@ int ha_partition::direct_update_rows(ha_rows *update_rows_result)
|
|||
}
|
||||
if (rnd_seq)
|
||||
{
|
||||
if ((error= (m_pre_calling ?
|
||||
file->ha_pre_index_or_rnd_end() :
|
||||
file->ha_index_or_rnd_end())))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
file->ha_pre_index_or_rnd_end() :
|
||||
file->ha_index_or_rnd_end()))))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
|
@ -11316,9 +11364,9 @@ int ha_partition::direct_delete_rows_init()
|
|||
bitmap_is_set(&(m_part_info->lock_partitions), i))
|
||||
{
|
||||
handler *file= m_file[i];
|
||||
if ((error= (m_pre_calling ?
|
||||
file->pre_direct_delete_rows_init() :
|
||||
file->direct_delete_rows_init())))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
file->pre_direct_delete_rows_init() :
|
||||
file->direct_delete_rows_init()))))
|
||||
{
|
||||
DBUG_PRINT("exit", ("error in direct_delete_rows_init"));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -11415,9 +11463,9 @@ int ha_partition::direct_delete_rows(ha_rows *delete_rows_result)
|
|||
{
|
||||
if (rnd_seq && (m_pre_calling ? file->pre_inited : file->inited) == NONE)
|
||||
{
|
||||
if ((error= (m_pre_calling ?
|
||||
file->ha_pre_rnd_init(TRUE) :
|
||||
file->ha_rnd_init(TRUE))))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
file->ha_pre_rnd_init(TRUE) :
|
||||
file->ha_rnd_init(TRUE)))))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if ((error= (m_pre_calling ?
|
||||
|
@ -11434,9 +11482,9 @@ int ha_partition::direct_delete_rows(ha_rows *delete_rows_result)
|
|||
}
|
||||
if (rnd_seq)
|
||||
{
|
||||
if ((error= (m_pre_calling ?
|
||||
file->ha_pre_index_or_rnd_end() :
|
||||
file->ha_index_or_rnd_end())))
|
||||
if (unlikely((error= (m_pre_calling ?
|
||||
file->ha_pre_index_or_rnd_end() :
|
||||
file->ha_index_or_rnd_end()))))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ int ha_sequence::open(const char *name, int mode, uint flags)
|
|||
DBUG_ASSERT(table->s == table_share && file);
|
||||
|
||||
file->table= table;
|
||||
if (!(error= file->open(name, mode, flags)))
|
||||
if (likely(!(error= file->open(name, mode, flags))))
|
||||
{
|
||||
/*
|
||||
Allocate ref in table's mem_root. We can't use table's ref
|
||||
|
@ -111,7 +111,7 @@ int ha_sequence::open(const char *name, int mode, uint flags)
|
|||
/* Don't try to read the inital row the call is part of create code */
|
||||
if (!(flags & (HA_OPEN_FOR_CREATE | HA_OPEN_FOR_REPAIR)))
|
||||
{
|
||||
if ((error= table->s->sequence->read_initial_values(table)))
|
||||
if (unlikely((error= table->s->sequence->read_initial_values(table))))
|
||||
file->ha_close();
|
||||
}
|
||||
else
|
||||
|
@ -216,7 +216,7 @@ int ha_sequence::write_row(uchar *buf)
|
|||
if (tmp_seq.check_and_adjust(0))
|
||||
DBUG_RETURN(HA_ERR_SEQUENCE_INVALID_DATA);
|
||||
sequence->copy(&tmp_seq);
|
||||
if (!(error= file->write_row(buf)))
|
||||
if (likely(!(error= file->write_row(buf))))
|
||||
sequence->initialized= SEQUENCE::SEQ_READY_TO_USE;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ int ha_sequence::write_row(uchar *buf)
|
|||
sequence->write_lock(table);
|
||||
}
|
||||
|
||||
if (!(error= file->update_first_row(buf)))
|
||||
if (likely(!(error= file->update_first_row(buf))))
|
||||
{
|
||||
Log_func *log_func= Write_rows_log_event::binlog_row_logging_function;
|
||||
if (!sequence_locked)
|
||||
|
|
|
@ -698,7 +698,7 @@ int ha_end()
|
|||
So if flag is equal to HA_PANIC_CLOSE, the deallocate
|
||||
the errors.
|
||||
*/
|
||||
if (ha_finish_errors())
|
||||
if (unlikely(ha_finish_errors()))
|
||||
error= 1;
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -1197,7 +1197,7 @@ int ha_prepare(THD *thd)
|
|||
handlerton *ht= ha_info->ht();
|
||||
if (ht->prepare)
|
||||
{
|
||||
if (prepare_or_error(ht, thd, all))
|
||||
if (unlikely(prepare_or_error(ht, thd, all)))
|
||||
{
|
||||
ha_rollback_trans(thd, all);
|
||||
error=1;
|
||||
|
@ -1475,7 +1475,7 @@ int ha_commit_trans(THD *thd, bool all)
|
|||
Sic: we know that prepare() is not NULL since otherwise
|
||||
trans->no_2pc would have been set.
|
||||
*/
|
||||
if (prepare_or_error(ht, thd, all))
|
||||
if (unlikely(prepare_or_error(ht, thd, all)))
|
||||
goto err;
|
||||
|
||||
need_prepare_ordered|= (ht->prepare_ordered != NULL);
|
||||
|
@ -2525,7 +2525,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
|
|||
dummy_table.s= &dummy_share;
|
||||
|
||||
path= get_canonical_filename(file, path, tmp_path);
|
||||
if ((error= file->ha_delete_table(path)))
|
||||
if (unlikely((error= file->ha_delete_table(path))))
|
||||
{
|
||||
/*
|
||||
it's not an error if the table doesn't exist in the engine.
|
||||
|
@ -2681,7 +2681,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
|||
|
||||
set_partitions_to_open(partitions_to_open);
|
||||
|
||||
if ((error=open(name,mode,test_if_locked)))
|
||||
if (unlikely((error=open(name,mode,test_if_locked))))
|
||||
{
|
||||
if ((error == EACCES || error == EROFS) && mode == O_RDWR &&
|
||||
(table->db_stat & HA_TRY_READ_ONLY))
|
||||
|
@ -2690,7 +2690,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
|||
error=open(name,O_RDONLY,test_if_locked);
|
||||
}
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_errno= error; /* Safeguard */
|
||||
DBUG_PRINT("error",("error: %d errno: %d",error,errno));
|
||||
|
@ -2967,7 +2967,7 @@ bool handler::ha_was_semi_consistent_read()
|
|||
int handler::ha_rnd_init_with_error(bool scan)
|
||||
{
|
||||
int error;
|
||||
if (!(error= ha_rnd_init(scan)))
|
||||
if (likely(!(error= ha_rnd_init(scan))))
|
||||
return 0;
|
||||
table->file->print_error(error, MYF(0));
|
||||
return error;
|
||||
|
@ -2994,23 +2994,23 @@ int handler::read_first_row(uchar * buf, uint primary_key)
|
|||
if (stats.deleted < 10 || primary_key >= MAX_KEY ||
|
||||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
|
||||
{
|
||||
if (!(error= ha_rnd_init(1)))
|
||||
if (likely(!(error= ha_rnd_init(1))))
|
||||
{
|
||||
while ((error= ha_rnd_next(buf)) == HA_ERR_RECORD_DELETED)
|
||||
/* skip deleted row */;
|
||||
const int end_error= ha_rnd_end();
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= end_error;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Find the first row through the primary key */
|
||||
if (!(error= ha_index_init(primary_key, 0)))
|
||||
if (likely(!(error= ha_index_init(primary_key, 0))))
|
||||
{
|
||||
error= ha_index_first(buf);
|
||||
const int end_error= ha_index_end();
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= end_error;
|
||||
}
|
||||
}
|
||||
|
@ -3430,7 +3430,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
|
|||
*nb_reserved_values= 1;
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND)
|
||||
/* No entry found, that's fine */;
|
||||
|
@ -3818,7 +3818,7 @@ void handler::print_error(int error, myf errflag)
|
|||
}
|
||||
}
|
||||
DBUG_ASSERT(textno > 0);
|
||||
if (fatal_error)
|
||||
if (unlikely(fatal_error))
|
||||
{
|
||||
/* Ensure this becomes a true error */
|
||||
errflag&= ~(ME_JUST_WARNING | ME_JUST_INFO);
|
||||
|
@ -3945,7 +3945,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
|
|||
if (table->s->frm_version < FRM_VER_TRUE_VARCHAR)
|
||||
return HA_ADMIN_NEEDS_ALTER;
|
||||
|
||||
if ((error= check_collation_compatibility()))
|
||||
if (unlikely((error= check_collation_compatibility())))
|
||||
return error;
|
||||
|
||||
return check_for_upgrade(check_opt);
|
||||
|
@ -4023,7 +4023,8 @@ uint handler::get_dup_key(int error)
|
|||
m_lock_type != F_UNLCK);
|
||||
DBUG_ENTER("handler::get_dup_key");
|
||||
table->file->errkey = (uint) -1;
|
||||
if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
|
||||
if (error == HA_ERR_FOUND_DUPP_KEY ||
|
||||
error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
|
||||
error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL ||
|
||||
error == HA_ERR_DROP_INDEX_FK)
|
||||
table->file->info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
|
||||
|
@ -4087,14 +4088,14 @@ int handler::rename_table(const char * from, const char * to)
|
|||
start_ext= bas_ext();
|
||||
for (ext= start_ext; *ext ; ext++)
|
||||
{
|
||||
if (rename_file_ext(from, to, *ext))
|
||||
if (unlikely(rename_file_ext(from, to, *ext)))
|
||||
{
|
||||
if ((error=my_errno) != ENOENT)
|
||||
break;
|
||||
error= 0;
|
||||
}
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
/* Try to revert the rename. Ignore errors. */
|
||||
for (; ext >= start_ext; ext--)
|
||||
|
@ -4138,15 +4139,15 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
|
|||
|
||||
if (table->s->mysql_version < MYSQL_VERSION_ID)
|
||||
{
|
||||
if ((error= check_old_types()))
|
||||
if (unlikely((error= check_old_types())))
|
||||
return error;
|
||||
error= ha_check_for_upgrade(check_opt);
|
||||
if (error && (error != HA_ADMIN_NEEDS_CHECK))
|
||||
if (unlikely(error && (error != HA_ADMIN_NEEDS_CHECK)))
|
||||
return error;
|
||||
if (!error && (check_opt->sql_flags & TT_FOR_UPGRADE))
|
||||
if (unlikely(!error && (check_opt->sql_flags & TT_FOR_UPGRADE)))
|
||||
return 0;
|
||||
}
|
||||
if ((error= check(thd, check_opt)))
|
||||
if (unlikely((error= check(thd, check_opt))))
|
||||
return error;
|
||||
/* Skip updating frm version if not main handler. */
|
||||
if (table->file != this)
|
||||
|
@ -4656,7 +4657,7 @@ int ha_enable_transaction(THD *thd, bool on)
|
|||
is an optimization hint that storage engine is free to ignore.
|
||||
So, let's commit an open transaction (if any) now.
|
||||
*/
|
||||
if (!(error= ha_commit_trans(thd, 0)))
|
||||
if (likely(!(error= ha_commit_trans(thd, 0))))
|
||||
error= trans_commit_implicit(thd);
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
|
@ -4914,7 +4915,7 @@ int ha_create_table(THD *thd, const char *path,
|
|||
|
||||
error= table.file->ha_create(name, &table, create_info);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (!thd->is_error())
|
||||
my_error(ER_CANT_CREATE_TABLE, MYF(0), db, table_name, error);
|
||||
|
@ -5069,7 +5070,7 @@ static my_bool discover_handlerton(THD *thd, plugin_ref plugin,
|
|||
int error= hton->discover_table(hton, thd, share);
|
||||
if (error != HA_ERR_NO_SUCH_TABLE)
|
||||
{
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (!share->error)
|
||||
{
|
||||
|
@ -5706,12 +5707,12 @@ int handler::index_read_idx_map(uchar * buf, uint index, const uchar * key,
|
|||
int error, UNINIT_VAR(error1);
|
||||
|
||||
error= ha_index_init(index, 0);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
error= index_read_map(buf, key, keypart_map, find_flag);
|
||||
error1= ha_index_end();
|
||||
}
|
||||
return error ? error : error1;
|
||||
return error ? error : error1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -5848,7 +5849,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
|
|||
We also check thd->is_error() as Innodb may return 0 even if
|
||||
there was an error.
|
||||
*/
|
||||
if (!result && !thd->is_error())
|
||||
if (likely(!result && !thd->is_error()))
|
||||
my_eof(thd);
|
||||
else if (!thd->is_error())
|
||||
my_error(ER_GET_ERRNO, MYF(0), errno, hton_name(db_type)->str);
|
||||
|
@ -6111,7 +6112,7 @@ int handler::ha_external_lock(THD *thd, int lock_type)
|
|||
|
||||
DBUG_EXECUTE_IF("external_lock_failure", error= HA_ERR_GENERIC;);
|
||||
|
||||
if (error == 0 || lock_type == F_UNLCK)
|
||||
if (likely(error == 0 || lock_type == F_UNLCK))
|
||||
{
|
||||
m_lock_type= lock_type;
|
||||
cached_table_flags= table_flags();
|
||||
|
@ -6232,10 +6233,10 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
|
|||
int handler::update_first_row(uchar *new_data)
|
||||
{
|
||||
int error;
|
||||
if (!(error= ha_rnd_init(1)))
|
||||
if (likely(!(error= ha_rnd_init(1))))
|
||||
{
|
||||
int end_error;
|
||||
if (!(error= ha_rnd_next(table->record[1])))
|
||||
if (likely(!(error= ha_rnd_next(table->record[1]))))
|
||||
{
|
||||
/*
|
||||
We have to do the memcmp as otherwise we may get error 169 from InnoDB
|
||||
|
@ -6244,7 +6245,7 @@ int handler::update_first_row(uchar *new_data)
|
|||
error= update_row(table->record[1], new_data);
|
||||
}
|
||||
end_error= ha_rnd_end();
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= end_error;
|
||||
/* Logging would be wrong if update_row works but ha_rnd_end fails */
|
||||
DBUG_ASSERT(!end_error || error != 0);
|
||||
|
|
|
@ -460,7 +460,7 @@ int ip_to_hostname(struct sockaddr_storage *ip_storage,
|
|||
entry->m_last_seen= now;
|
||||
*connect_errors= entry->m_errors.m_connect;
|
||||
|
||||
if (entry->m_errors.m_connect >= max_connect_errors)
|
||||
if (unlikely(entry->m_errors.m_connect >= max_connect_errors))
|
||||
{
|
||||
entry->m_errors.m_host_blocked++;
|
||||
entry->set_error_timestamps(now);
|
||||
|
|
65
sql/item.cc
65
sql/item.cc
|
@ -295,7 +295,7 @@ longlong Item::val_int_signed_typecast_from_str()
|
|||
{
|
||||
int error;
|
||||
longlong value= val_int_from_str(&error);
|
||||
if (!null_value && value < 0 && error == 0)
|
||||
if (unlikely(!null_value && value < 0 && error == 0))
|
||||
push_note_converted_to_negative_complement(current_thd);
|
||||
return value;
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ longlong Item::val_int_unsigned_typecast_from_str()
|
|||
{
|
||||
int error;
|
||||
longlong value= val_int_from_str(&error);
|
||||
if (!null_value && error < 0)
|
||||
if (unlikely(!null_value && error < 0))
|
||||
push_note_converted_to_positive_complement(current_thd);
|
||||
return value;
|
||||
}
|
||||
|
@ -703,12 +703,11 @@ Item* Item::set_expr_cache(THD *thd)
|
|||
{
|
||||
DBUG_ENTER("Item::set_expr_cache");
|
||||
Item_cache_wrapper *wrapper;
|
||||
if ((wrapper= new (thd->mem_root) Item_cache_wrapper(thd, this)) &&
|
||||
!wrapper->fix_fields(thd, (Item**)&wrapper))
|
||||
if (likely((wrapper= new (thd->mem_root) Item_cache_wrapper(thd, this))) &&
|
||||
likely(!wrapper->fix_fields(thd, (Item**)&wrapper)))
|
||||
{
|
||||
if (wrapper->set_cache(thd))
|
||||
DBUG_RETURN(NULL);
|
||||
DBUG_RETURN(wrapper);
|
||||
if (likely(!wrapper->set_cache(thd)))
|
||||
DBUG_RETURN(wrapper);
|
||||
}
|
||||
DBUG_RETURN(NULL);
|
||||
}
|
||||
|
@ -1307,7 +1306,7 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
|
|||
return this;
|
||||
Item_cache *cache;
|
||||
if (!conv || conv->fix_fields(thd, (Item **) NULL) ||
|
||||
!(cache= new (thd->mem_root) Item_cache_str(thd, conv)))
|
||||
unlikely(!(cache= new (thd->mem_root) Item_cache_str(thd, conv))))
|
||||
return NULL; // Safe conversion is not possible, or OEM
|
||||
cache->setup(thd, conv);
|
||||
cache->fixed= false; // Make Item::fix_fields() happy
|
||||
|
@ -1389,7 +1388,7 @@ Item *Item::const_charset_converter(THD *thd, CHARSET_INFO *tocs,
|
|||
collation.derivation,
|
||||
collation.repertoire));
|
||||
|
||||
if (!conv || (conv_errors && lossless))
|
||||
if (unlikely(!conv || (conv_errors && lossless)))
|
||||
{
|
||||
/*
|
||||
Safe conversion is not possible (or EOM).
|
||||
|
@ -2760,13 +2759,13 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
|
|||
Item* Item_func_or_sum::build_clone(THD *thd)
|
||||
{
|
||||
Item_func_or_sum *copy= (Item_func_or_sum *) get_copy(thd);
|
||||
if (!copy)
|
||||
if (unlikely(!copy))
|
||||
return 0;
|
||||
if (arg_count > 2)
|
||||
{
|
||||
copy->args=
|
||||
(Item**) alloc_root(thd->mem_root, sizeof(Item*) * arg_count);
|
||||
if (!copy->args)
|
||||
if (unlikely(!copy->args))
|
||||
return 0;
|
||||
}
|
||||
else if (arg_count > 0)
|
||||
|
@ -2863,7 +2862,7 @@ Item_sp::sp_check_access(THD *thd)
|
|||
*/
|
||||
bool Item_sp::execute(THD *thd, bool *null_value, Item **args, uint arg_count)
|
||||
{
|
||||
if (execute_impl(thd, args, arg_count))
|
||||
if (unlikely(execute_impl(thd, args, arg_count)))
|
||||
{
|
||||
*null_value= 1;
|
||||
context->process_error(thd);
|
||||
|
@ -2905,7 +2904,7 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count)
|
|||
thd->security_ctx= context->security_ctx;
|
||||
}
|
||||
|
||||
if (sp_check_access(thd))
|
||||
if (unlikely(sp_check_access(thd)))
|
||||
{
|
||||
thd->security_ctx= save_security_ctx;
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -2916,10 +2915,10 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count)
|
|||
statement-based replication (SBR) is active.
|
||||
*/
|
||||
|
||||
if (!m_sp->detistic() && !trust_function_creators &&
|
||||
(access == SP_CONTAINS_SQL || access == SP_MODIFIES_SQL_DATA) &&
|
||||
(mysql_bin_log.is_open() &&
|
||||
thd->variables.binlog_format == BINLOG_FORMAT_STMT))
|
||||
if (unlikely(!m_sp->detistic() && !trust_function_creators &&
|
||||
(access == SP_CONTAINS_SQL || access == SP_MODIFIES_SQL_DATA) &&
|
||||
(mysql_bin_log.is_open() &&
|
||||
thd->variables.binlog_format == BINLOG_FORMAT_STMT)))
|
||||
{
|
||||
my_error(ER_BINLOG_UNSAFE_ROUTINE, MYF(0));
|
||||
thd->security_ctx= save_security_ctx;
|
||||
|
@ -3049,9 +3048,10 @@ Item_sp::init_result_field(THD *thd, uint max_length, uint maybe_null,
|
|||
Item* Item_ref::build_clone(THD *thd)
|
||||
{
|
||||
Item_ref *copy= (Item_ref *) get_copy(thd);
|
||||
if (!copy ||
|
||||
!(copy->ref= (Item**) alloc_root(thd->mem_root, sizeof(Item*))) ||
|
||||
!(*copy->ref= (* ref)->build_clone(thd)))
|
||||
if (unlikely(!copy) ||
|
||||
unlikely(!(copy->ref= (Item**) alloc_root(thd->mem_root,
|
||||
sizeof(Item*)))) ||
|
||||
unlikely(!(*copy->ref= (* ref)->build_clone(thd))))
|
||||
return 0;
|
||||
return copy;
|
||||
}
|
||||
|
@ -4213,7 +4213,7 @@ bool Item_param::set_str(const char *str, ulong length,
|
|||
been written to the binary log.
|
||||
*/
|
||||
uint dummy_errors;
|
||||
if (value.m_string.copy(str, length, fromcs, tocs, &dummy_errors))
|
||||
if (unlikely(value.m_string.copy(str, length, fromcs, tocs, &dummy_errors)))
|
||||
DBUG_RETURN(TRUE);
|
||||
/*
|
||||
Set str_value_ptr to make sure it's in sync with str_value.
|
||||
|
@ -6152,7 +6152,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||
|
||||
Field *new_field= (*((Item_field**)res))->field;
|
||||
|
||||
if (new_field == NULL)
|
||||
if (unlikely(new_field == NULL))
|
||||
{
|
||||
/* The column to which we link isn't valid. */
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0), (*res)->name.str,
|
||||
|
@ -6197,7 +6197,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||
}
|
||||
}
|
||||
|
||||
if (!select)
|
||||
if (unlikely(!select))
|
||||
{
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where);
|
||||
goto error;
|
||||
|
@ -6605,7 +6605,7 @@ String *Item::check_well_formed_result(String *str, bool send_error)
|
|||
CHARSET_INFO *cs= str->charset();
|
||||
uint wlen= str->well_formed_length();
|
||||
null_value= false;
|
||||
if (wlen < str->length())
|
||||
if (unlikely(wlen < str->length()))
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
char hexbuf[7];
|
||||
|
@ -6644,9 +6644,10 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst,
|
|||
CHARSET_INFO *srccs, const char *src,
|
||||
uint32 src_length, uint32 nchars)
|
||||
{
|
||||
if ((dst->copy(dstcs, srccs, src, src_length, nchars, this)))
|
||||
if (unlikely((dst->copy(dstcs, srccs, src, src_length, nchars, this))))
|
||||
return true; // EOM
|
||||
if (const char *pos= well_formed_error_pos())
|
||||
const char *pos;
|
||||
if (unlikely(pos= well_formed_error_pos()))
|
||||
{
|
||||
ErrConvString err(pos, src_length - (pos - src), &my_charset_bin);
|
||||
push_warning_printf(m_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
|
@ -6657,7 +6658,7 @@ String_copier_for_item::copy_with_warn(CHARSET_INFO *dstcs, String *dst,
|
|||
err.ptr());
|
||||
return false;
|
||||
}
|
||||
if (const char *pos= cannot_convert_error_pos())
|
||||
if (unlikely(pos= cannot_convert_error_pos()))
|
||||
{
|
||||
char buf[16];
|
||||
int mblen= my_charlen(srccs, pos, src + src_length);
|
||||
|
@ -7180,7 +7181,7 @@ Item_float::Item_float(THD *thd, const char *str_arg, size_t length):
|
|||
char *end_not_used;
|
||||
value= my_strntod(&my_charset_bin, (char*) str_arg, length, &end_not_used,
|
||||
&error);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
char tmp[NAME_LEN + 1];
|
||||
my_snprintf(tmp, sizeof(tmp), "%.*s", (int)length, str_arg);
|
||||
|
@ -7929,7 +7930,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
|||
Field *from_field;
|
||||
ref= 0;
|
||||
|
||||
if (!outer_context)
|
||||
if (unlikely(!outer_context))
|
||||
{
|
||||
/* The current reference cannot be resolved in this query. */
|
||||
my_error(ER_BAD_FIELD_ERROR,MYF(0),
|
||||
|
@ -8077,7 +8078,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
|
|||
last_checked_context->select_lex->nest_level);
|
||||
return FALSE;
|
||||
}
|
||||
if (ref == 0)
|
||||
if (unlikely(ref == 0))
|
||||
{
|
||||
/* The item was not a table field and not a reference */
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0),
|
||||
|
@ -9501,7 +9502,7 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items)
|
|||
|
||||
if (arg->type() == REF_ITEM)
|
||||
arg= static_cast<Item_ref *>(arg)->ref[0];
|
||||
if (arg->type() != FIELD_ITEM)
|
||||
if (unlikely(arg->type() != FIELD_ITEM))
|
||||
{
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0), "", "VALUES() function");
|
||||
return TRUE;
|
||||
|
@ -9650,7 +9651,7 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items)
|
|||
|
||||
/* Set field. */
|
||||
|
||||
if (field_idx != (uint)-1)
|
||||
if (likely(field_idx != (uint)-1))
|
||||
{
|
||||
#ifndef NO_EMBEDDED_ACCESS_CHECKS
|
||||
/*
|
||||
|
|
|
@ -2025,7 +2025,7 @@ template <class T>
|
|||
inline Item* get_item_copy (THD *thd, T* item)
|
||||
{
|
||||
Item *copy= new (get_thd_memroot(thd)) T(*item);
|
||||
if (copy)
|
||||
if (likely(copy))
|
||||
copy->register_in(thd);
|
||||
return copy;
|
||||
}
|
||||
|
@ -2162,7 +2162,7 @@ public:
|
|||
Item_args(THD *thd, Item *a, Item *b, Item *c)
|
||||
{
|
||||
arg_count= 0;
|
||||
if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 3)))
|
||||
if (likely((args= (Item**) thd_alloc(thd, sizeof(Item*) * 3))))
|
||||
{
|
||||
arg_count= 3;
|
||||
args[0]= a; args[1]= b; args[2]= c;
|
||||
|
@ -2171,7 +2171,7 @@ public:
|
|||
Item_args(THD *thd, Item *a, Item *b, Item *c, Item *d)
|
||||
{
|
||||
arg_count= 0;
|
||||
if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 4)))
|
||||
if (likely((args= (Item**) thd_alloc(thd, sizeof(Item*) * 4))))
|
||||
{
|
||||
arg_count= 4;
|
||||
args[0]= a; args[1]= b; args[2]= c; args[3]= d;
|
||||
|
@ -2180,7 +2180,7 @@ public:
|
|||
Item_args(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e)
|
||||
{
|
||||
arg_count= 5;
|
||||
if ((args= (Item**) thd_alloc(thd, sizeof(Item*) * 5)))
|
||||
if (likely((args= (Item**) thd_alloc(thd, sizeof(Item*) * 5))))
|
||||
{
|
||||
arg_count= 5;
|
||||
args[0]= a; args[1]= b; args[2]= c; args[3]= d; args[4]= e;
|
||||
|
|
|
@ -4302,7 +4302,7 @@ bool cmp_item_row::prepare_comparators(THD *thd, Item **args, uint arg_count)
|
|||
bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd)
|
||||
{
|
||||
uint cols= args[0]->cols();
|
||||
if (!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0)))
|
||||
if (unlikely(!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0))))
|
||||
return true;
|
||||
cmp_item_row *cmp= &((in_row*)array)->tmp;
|
||||
if (cmp->alloc_comparators(thd, cols) ||
|
||||
|
@ -4313,7 +4313,7 @@ bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd)
|
|||
Call store_value() to setup others.
|
||||
*/
|
||||
cmp->store_value(args[0]);
|
||||
if (thd->is_fatal_error) // OOM
|
||||
if (unlikely(thd->is_fatal_error)) // OOM
|
||||
return true;
|
||||
fix_in_vector();
|
||||
return false;
|
||||
|
@ -5437,7 +5437,7 @@ bool Regexp_processor_pcre::compile(String *pattern, bool send_error)
|
|||
m_pcre= pcre_compile(pattern->c_ptr_safe(), m_library_flags,
|
||||
&pcreErrorStr, &pcreErrorOffset, NULL);
|
||||
|
||||
if (m_pcre == NULL)
|
||||
if (unlikely(m_pcre == NULL))
|
||||
{
|
||||
if (send_error)
|
||||
{
|
||||
|
@ -5456,7 +5456,7 @@ bool Regexp_processor_pcre::compile(Item *item, bool send_error)
|
|||
char buff[MAX_FIELD_WIDTH];
|
||||
String tmp(buff, sizeof(buff), &my_charset_bin);
|
||||
String *pattern= item->val_str(&tmp);
|
||||
if (item->null_value || compile(pattern, send_error))
|
||||
if (unlikely(item->null_value) || (unlikely(compile(pattern, send_error))))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -5575,7 +5575,7 @@ int Regexp_processor_pcre::pcre_exec_with_warn(const pcre *code,
|
|||
int rc= pcre_exec(code, extra, subject, length,
|
||||
startoffset, options, ovector, ovecsize);
|
||||
DBUG_EXECUTE_IF("pcre_exec_error_123", rc= -123;);
|
||||
if (rc < PCRE_ERROR_NOMATCH)
|
||||
if (unlikely(rc < PCRE_ERROR_NOMATCH))
|
||||
pcre_exec_warn(rc);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -3313,7 +3313,7 @@ Create_qfunc::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list)
|
|||
{
|
||||
LEX_CSTRING db;
|
||||
|
||||
if (! thd->db.str && ! thd->lex->sphead)
|
||||
if (unlikely(! thd->db.str && ! thd->lex->sphead))
|
||||
{
|
||||
/*
|
||||
The proper error message should be in the lines of:
|
||||
|
@ -3462,7 +3462,7 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name,
|
|||
const Sp_handler *sph= &sp_handler_function;
|
||||
Database_qualified_name pkgname(&null_clex_str, &null_clex_str);
|
||||
|
||||
if (has_named_parameters(item_list))
|
||||
if (unlikely(has_named_parameters(item_list)))
|
||||
{
|
||||
/*
|
||||
The syntax "db.foo(expr AS p1, expr AS p2, ...) is invalid,
|
||||
|
@ -3481,8 +3481,8 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name,
|
|||
arg_count= item_list->elements;
|
||||
|
||||
qname= new (thd->mem_root) sp_name(db, name, use_explicit_name);
|
||||
if (sph->sp_resolve_package_routine(thd, thd->lex->sphead,
|
||||
qname, &sph, &pkgname))
|
||||
if (unlikely(sph->sp_resolve_package_routine(thd, thd->lex->sphead,
|
||||
qname, &sph, &pkgname)))
|
||||
return NULL;
|
||||
sph->add_used_routine(lex, thd, qname);
|
||||
if (pkgname.m_name.length)
|
||||
|
@ -3502,7 +3502,7 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name,
|
|||
Item*
|
||||
Create_native_func::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list)
|
||||
{
|
||||
if (has_named_parameters(item_list))
|
||||
if (unlikely(has_named_parameters(item_list)))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3520,7 +3520,7 @@ Create_func_arg0::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count != 0)
|
||||
if (unlikely(arg_count != 0))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3538,7 +3538,7 @@ Create_func_arg1::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
if (item_list)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count != 1)
|
||||
if (unlikely(arg_count != 1))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3546,7 +3546,7 @@ Create_func_arg1::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
|
||||
Item *param_1= item_list->pop();
|
||||
|
||||
if (! param_1->is_autogenerated_name)
|
||||
if (unlikely(! param_1->is_autogenerated_name))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3564,7 +3564,7 @@ Create_func_arg2::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
if (item_list)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count != 2)
|
||||
if (unlikely(arg_count != 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3573,8 +3573,8 @@ Create_func_arg2::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
Item *param_1= item_list->pop();
|
||||
Item *param_2= item_list->pop();
|
||||
|
||||
if ( (! param_1->is_autogenerated_name)
|
||||
|| (! param_2->is_autogenerated_name))
|
||||
if (unlikely(!param_1->is_autogenerated_name ||
|
||||
!param_2->is_autogenerated_name))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3592,7 +3592,7 @@ Create_func_arg3::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
if (item_list)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count != 3)
|
||||
if (unlikely(arg_count != 3))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3602,9 +3602,9 @@ Create_func_arg3::create_func(THD *thd, LEX_CSTRING *name, List<Item> *item_list
|
|||
Item *param_2= item_list->pop();
|
||||
Item *param_3= item_list->pop();
|
||||
|
||||
if ( (! param_1->is_autogenerated_name)
|
||||
|| (! param_2->is_autogenerated_name)
|
||||
|| (! param_3->is_autogenerated_name))
|
||||
if (unlikely(!param_1->is_autogenerated_name ||
|
||||
!param_2->is_autogenerated_name ||
|
||||
!param_3->is_autogenerated_name))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMETERS_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3765,7 +3765,7 @@ Item*
|
|||
Create_func_binlog_gtid_pos::create_2_arg(THD *thd, Item *arg1, Item *arg2)
|
||||
{
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (!mysql_bin_log.is_open())
|
||||
if (unlikely(!mysql_bin_log.is_open()))
|
||||
#endif
|
||||
{
|
||||
my_error(ER_NO_BINARY_LOGGING, MYF(0));
|
||||
|
@ -3903,7 +3903,7 @@ Create_func_concat::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 1)
|
||||
if (unlikely(arg_count < 1))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3926,7 +3926,7 @@ Create_func_concat_operator_oracle::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 1)
|
||||
if (unlikely(arg_count < 1))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3950,7 +3950,7 @@ Create_func_decode_oracle::create_native(THD *thd, LEX_CSTRING *name,
|
|||
List<Item> *item_list)
|
||||
{
|
||||
uint arg_count= item_list ? item_list->elements : 0;
|
||||
if (arg_count < 3)
|
||||
if (unlikely(arg_count < 3))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -3970,7 +3970,7 @@ Create_func_concat_ws::create_native(THD *thd, LEX_CSTRING *name,
|
|||
arg_count= item_list->elements;
|
||||
|
||||
/* "WS" stands for "With Separator": this function takes 2+ arguments */
|
||||
if (arg_count < 2)
|
||||
if (unlikely(arg_count < 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -4272,7 +4272,7 @@ Create_func_elt::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2)
|
||||
if (unlikely(arg_count < 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -4468,7 +4468,7 @@ Create_func_field::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2)
|
||||
if (unlikely(arg_count < 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -4841,7 +4841,7 @@ Create_func_greatest::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2)
|
||||
if (unlikely(arg_count < 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -5135,7 +5135,7 @@ Create_func_json_detailed::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/)
|
||||
if (unlikely(arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5284,7 +5284,7 @@ Create_func_json_array_append::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
|
||||
if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5310,7 +5310,7 @@ Create_func_json_array_insert::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
|
||||
if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5336,7 +5336,7 @@ Create_func_json_insert::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
|
||||
if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5363,7 +5363,7 @@ Create_func_json_set::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
|
||||
if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5390,7 +5390,7 @@ Create_func_json_replace::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 || (arg_count & 1) == 0 /*is even*/)
|
||||
if (unlikely(arg_count < 3 || (arg_count & 1) == 0 /*is even*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5417,7 +5417,7 @@ Create_func_json_remove::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2 /*json_doc, path [,path]*/)
|
||||
if (unlikely(arg_count < 2 /*json_doc, path [,path]*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5443,7 +5443,7 @@ Create_func_json_object::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
{
|
||||
arg_count= item_list->elements;
|
||||
if ((arg_count & 1) != 0 /*is odd*/)
|
||||
if (unlikely((arg_count & 1) != 0 /*is odd*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
func= NULL;
|
||||
|
@ -5473,8 +5473,8 @@ Create_func_json_length::create_native(THD *thd, LEX_CSTRING *name,
|
|||
Item *func;
|
||||
int arg_count;
|
||||
|
||||
if (item_list == NULL ||
|
||||
(arg_count= item_list->elements) == 0)
|
||||
if (unlikely(item_list == NULL ||
|
||||
(arg_count= item_list->elements) == 0))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
func= NULL;
|
||||
|
@ -5498,8 +5498,8 @@ Create_func_json_merge::create_native(THD *thd, LEX_CSTRING *name,
|
|||
Item *func;
|
||||
int arg_count;
|
||||
|
||||
if (item_list == NULL ||
|
||||
(arg_count= item_list->elements) < 2) // json, json
|
||||
if (unlikely(item_list == NULL ||
|
||||
(arg_count= item_list->elements) < 2)) // json, json
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
func= NULL;
|
||||
|
@ -5526,7 +5526,7 @@ Create_func_json_contains::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count == 2 || arg_count == 3/* json_doc, val, [path] */)
|
||||
if (unlikely(arg_count == 2 || arg_count == 3/* json_doc, val, [path] */))
|
||||
{
|
||||
func= new (thd->mem_root) Item_func_json_contains(thd, *item_list);
|
||||
}
|
||||
|
@ -5552,7 +5552,7 @@ Create_func_json_keys::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/)
|
||||
if (unlikely(arg_count < 1 || arg_count > 2 /* json_doc, [path]...*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5578,7 +5578,7 @@ Create_func_json_contains_path::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 /* json_doc, one_or_all, path, [path]...*/)
|
||||
if (unlikely(arg_count < 3 /* json_doc, one_or_all, path, [path]...*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5604,7 +5604,7 @@ Create_func_json_extract::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2 /* json_doc, path, [path]...*/)
|
||||
if (unlikely(arg_count < 2 /* json_doc, path, [path]...*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5630,7 +5630,7 @@ Create_func_json_search::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 3 /* json_doc, one_or_all, search_str, [escape_char[, path]...*/)
|
||||
if (unlikely(arg_count < 3 /* json_doc, one_or_all, search_str, [escape_char[, path]...*/))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
}
|
||||
|
@ -5701,7 +5701,7 @@ Create_func_least::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2)
|
||||
if (unlikely(arg_count < 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -5979,7 +5979,7 @@ Create_func_make_set::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2)
|
||||
if (unlikely(arg_count < 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return NULL;
|
||||
|
@ -6004,7 +6004,7 @@ Create_func_master_pos_wait::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 2 || arg_count > 4)
|
||||
if (unlikely(arg_count < 2 || arg_count > 4))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return func;
|
||||
|
@ -6054,7 +6054,7 @@ Create_func_master_gtid_wait::create_native(THD *thd, LEX_CSTRING *name,
|
|||
if (item_list != NULL)
|
||||
arg_count= item_list->elements;
|
||||
|
||||
if (arg_count < 1 || arg_count > 2)
|
||||
if (unlikely(arg_count < 1 || arg_count > 2))
|
||||
{
|
||||
my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
|
||||
return func;
|
||||
|
@ -7487,7 +7487,7 @@ Item *create_temporal_literal(THD *thd,
|
|||
DBUG_ASSERT(0);
|
||||
}
|
||||
|
||||
if (item)
|
||||
if (likely(item))
|
||||
{
|
||||
if (status.warnings) // e.g. a note on nanosecond truncation
|
||||
{
|
||||
|
@ -7596,7 +7596,7 @@ Item *create_func_dyncol_get(THD *thd, Item *str, Item *num,
|
|||
{
|
||||
Item *res;
|
||||
|
||||
if (!(res= new (thd->mem_root) Item_dyncol_get(thd, str, num)))
|
||||
if (likely(!(res= new (thd->mem_root) Item_dyncol_get(thd, str, num))))
|
||||
return res; // Return NULL
|
||||
return handler->create_typecast_item(thd, res,
|
||||
Type_cast_attributes(c_len, c_dec, cs));
|
||||
|
|
|
@ -376,7 +376,7 @@ Item_func::fix_fields(THD *thd, Item **ref)
|
|||
if (check_arguments())
|
||||
return true;
|
||||
fix_length_and_dec();
|
||||
if (thd->is_error()) // An error inside fix_length_and_dec occurred
|
||||
if (unlikely(thd->is_error())) // An error inside fix_length_and_dec occurred
|
||||
return TRUE;
|
||||
fixed= 1;
|
||||
return FALSE;
|
||||
|
@ -1149,7 +1149,8 @@ double Item_double_typecast::val_real()
|
|||
if ((null_value= args[0]->null_value))
|
||||
return 0.0;
|
||||
|
||||
if ((error= truncate_double(&tmp, max_length, decimals, 0, DBL_MAX)))
|
||||
if (unlikely((error= truncate_double(&tmp, max_length, decimals, 0,
|
||||
DBL_MAX))))
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
push_warning_printf(thd,
|
||||
|
@ -2741,7 +2742,7 @@ bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date)
|
|||
longlong res= args[i]->val_datetime_packed();
|
||||
|
||||
/* Check if we need to stop (because of error or KILL) and stop the loop */
|
||||
if (args[i]->null_value)
|
||||
if (unlikely(args[i]->null_value))
|
||||
return (null_value= 1);
|
||||
|
||||
if (i == 0 || (res < min_max ? cmp_sign : -cmp_sign) > 0)
|
||||
|
@ -2750,7 +2751,7 @@ bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date)
|
|||
unpack_time(min_max, ltime, mysql_timestamp_type());
|
||||
|
||||
if (!(fuzzy_date & TIME_TIME_ONLY) &&
|
||||
((null_value= check_date_with_warn(ltime, fuzzy_date,
|
||||
unlikely((null_value= check_date_with_warn(ltime, fuzzy_date,
|
||||
MYSQL_TIMESTAMP_ERROR))))
|
||||
return true;
|
||||
|
||||
|
@ -3379,7 +3380,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
|
|||
}
|
||||
}
|
||||
Udf_func_init init= u_d->func_init;
|
||||
if ((error=(uchar) init(&initid, &f_args, init_msg_buff)))
|
||||
if (unlikely((error=(uchar) init(&initid, &f_args, init_msg_buff))))
|
||||
{
|
||||
my_error(ER_CANT_INITIALIZE_UDF, MYF(0),
|
||||
u_d->name.str, init_msg_buff);
|
||||
|
@ -3397,7 +3398,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
|
|||
func->decimals=MY_MIN(initid.decimals,NOT_FIXED_DEC);
|
||||
}
|
||||
initialized=1;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_error(ER_CANT_INITIALIZE_UDF, MYF(0),
|
||||
u_d->name.str, ER_THD(thd, ER_UNKNOWN_ERROR));
|
||||
|
@ -3409,7 +3410,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
|
|||
|
||||
bool udf_handler::get_arguments()
|
||||
{
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
return 1; // Got an error earlier
|
||||
char *to= num_buffer;
|
||||
uint str_count=0;
|
||||
|
@ -3484,7 +3485,7 @@ String *udf_handler::val_str(String *str,String *save_str)
|
|||
char *res=func(&initid, &f_args, (char*) str->ptr(), &res_length,
|
||||
&is_null_tmp, &error);
|
||||
DBUG_PRINT("info", ("udf func returned, res_length: %lu", res_length));
|
||||
if (is_null_tmp || !res || error) // The !res is for safety
|
||||
if (is_null_tmp || !res || unlikely(error)) // The !res is for safety
|
||||
{
|
||||
DBUG_PRINT("info", ("Null or error"));
|
||||
DBUG_RETURN(0);
|
||||
|
@ -3520,7 +3521,7 @@ my_decimal *udf_handler::val_decimal(my_bool *null_value, my_decimal *dec_buf)
|
|||
u_d->func;
|
||||
|
||||
char *res= func(&initid, &f_args, buf, &res_length, &is_null, &error);
|
||||
if (is_null || error)
|
||||
if (is_null || unlikely(error))
|
||||
{
|
||||
*null_value= 1;
|
||||
return 0;
|
||||
|
@ -4081,7 +4082,7 @@ longlong Item_func_get_lock::val_int()
|
|||
thd->push_internal_handler(&lock_wait_timeout_handler);
|
||||
bool error= thd->mdl_context.acquire_lock(&ull_request, timeout);
|
||||
(void) thd->pop_internal_handler();
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (lock_wait_timeout_handler.m_lock_wait_timeout)
|
||||
null_value= 0;
|
||||
|
@ -5354,13 +5355,14 @@ get_var_with_binlog(THD *thd, enum_sql_command sql_command,
|
|||
new (thd->mem_root) Item_null(thd))),
|
||||
thd->mem_root);
|
||||
/* Create the variable if the above allocations succeeded */
|
||||
if (thd->is_fatal_error || sql_set_variables(thd, &tmp_var_list, false))
|
||||
if (unlikely(thd->is_fatal_error) ||
|
||||
unlikely(sql_set_variables(thd, &tmp_var_list, false)))
|
||||
{
|
||||
thd->lex= sav_lex;
|
||||
goto err;
|
||||
}
|
||||
thd->lex= sav_lex;
|
||||
if (!(var_entry= get_variable(&thd->user_vars, name, 0)))
|
||||
if (unlikely(!(var_entry= get_variable(&thd->user_vars, name, 0))))
|
||||
goto err;
|
||||
}
|
||||
else if (var_entry->used_query_id == thd->query_id ||
|
||||
|
@ -5389,8 +5391,8 @@ get_var_with_binlog(THD *thd, enum_sql_command sql_command,
|
|||
destroyed.
|
||||
*/
|
||||
size= ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)) + var_entry->length;
|
||||
if (!(user_var_event= (BINLOG_USER_VAR_EVENT *)
|
||||
alloc_root(thd->user_var_events_alloc, size)))
|
||||
if (unlikely(!(user_var_event= (BINLOG_USER_VAR_EVENT *)
|
||||
alloc_root(thd->user_var_events_alloc, size))))
|
||||
goto err;
|
||||
|
||||
user_var_event->value= (char*) user_var_event +
|
||||
|
@ -5439,7 +5441,7 @@ void Item_func_get_user_var::fix_length_and_dec()
|
|||
'm_var_entry' is NULL only if there occurred an error during the call to
|
||||
get_var_with_binlog.
|
||||
*/
|
||||
if (!error && m_var_entry)
|
||||
if (likely(!error && m_var_entry))
|
||||
{
|
||||
unsigned_flag= m_var_entry->unsigned_flag;
|
||||
max_length= (uint32)m_var_entry->length;
|
||||
|
@ -5930,8 +5932,8 @@ bool Item_func_match::init_search(THD *thd, bool no_order)
|
|||
for (uint i= 1; i < arg_count; i++)
|
||||
fields.push_back(args[i]);
|
||||
concat_ws= new (thd->mem_root) Item_func_concat_ws(thd, fields);
|
||||
if (thd->is_fatal_error)
|
||||
DBUG_RETURN(1); // OOM
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
DBUG_RETURN(1); // OOM in new or push_back
|
||||
/*
|
||||
Above function used only to get value and do not need fix_fields for it:
|
||||
Item_string - basic constant
|
||||
|
@ -6806,7 +6808,7 @@ longlong Item_func_nextval::val_int()
|
|||
entry->value= value;
|
||||
entry->set_version(table);
|
||||
|
||||
if (error) // Warning already printed
|
||||
if (unlikely(error)) // Warning already printed
|
||||
entry->null_value= null_value= 1; // For not strict mode
|
||||
DBUG_RETURN(value);
|
||||
}
|
||||
|
@ -6918,7 +6920,7 @@ longlong Item_func_setval::val_int()
|
|||
DBUG_ASSERT(table && table->s->sequence);
|
||||
thd= table->in_use;
|
||||
|
||||
if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION)
|
||||
if (unlikely(thd->count_cuted_fields == CHECK_FIELD_EXPRESSION))
|
||||
{
|
||||
/* Alter table checking if function works */
|
||||
null_value= 0;
|
||||
|
@ -6927,7 +6929,7 @@ longlong Item_func_setval::val_int()
|
|||
|
||||
value= nextval;
|
||||
error= table->s->sequence->set_value(table, nextval, round, is_used);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
null_value= 1;
|
||||
value= 0;
|
||||
|
|
|
@ -149,13 +149,14 @@ longlong Item_func_inet_bool_base::val_int()
|
|||
{
|
||||
DBUG_ASSERT(fixed);
|
||||
|
||||
if (args[0]->result_type() != STRING_RESULT) // String argument expected
|
||||
// String argument expected
|
||||
if (unlikely(args[0]->result_type() != STRING_RESULT))
|
||||
return 0;
|
||||
|
||||
String buffer;
|
||||
String *arg_str= args[0]->val_str(&buffer);
|
||||
|
||||
if (!arg_str) // Out-of memory happened. The error has been reported.
|
||||
if (unlikely(!arg_str)) // Out-of memory happened. error has been reported.
|
||||
return 0; // Or: the underlying field is NULL
|
||||
|
||||
return calc_value(arg_str) ? 1 : 0;
|
||||
|
@ -175,7 +176,8 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer)
|
|||
{
|
||||
DBUG_ASSERT(fixed);
|
||||
|
||||
if (args[0]->result_type() != STRING_RESULT) // String argument expected
|
||||
// String argument expected
|
||||
if (unlikely(args[0]->result_type() != STRING_RESULT))
|
||||
{
|
||||
null_value= true;
|
||||
return NULL;
|
||||
|
@ -183,15 +185,17 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer)
|
|||
|
||||
StringBuffer<STRING_BUFFER_USUAL_SIZE> tmp;
|
||||
String *arg_str= args[0]->val_str(&tmp);
|
||||
if (!arg_str) // Out-of memory happened. The error has been reported.
|
||||
{ // Or: the underlying field is NULL
|
||||
if (unlikely(!arg_str))
|
||||
{
|
||||
// Out-of memory happened. error has been reported.
|
||||
// Or: the underlying field is NULL
|
||||
null_value= true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
null_value= !calc_value(arg_str, buffer);
|
||||
|
||||
return null_value ? NULL : buffer;
|
||||
return unlikely(null_value) ? NULL : buffer;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -173,7 +173,7 @@ static int json_nice(json_engine_t *je, String *nice_js,
|
|||
key_end= je->s.c_str;
|
||||
} while (json_read_keyname_chr(je) == 0);
|
||||
|
||||
if (je->s.error)
|
||||
if (unlikely(je->s.error))
|
||||
goto error;
|
||||
|
||||
if (!first_value)
|
||||
|
@ -492,7 +492,7 @@ continue_search:
|
|||
if (json_read_value(&je))
|
||||
goto err_return;
|
||||
|
||||
if (check_and_get_value(&je, str, &error))
|
||||
if (unlikely(check_and_get_value(&je, str, &error)))
|
||||
{
|
||||
if (error)
|
||||
goto err_return;
|
||||
|
@ -623,7 +623,7 @@ String *Item_func_json_unquote::val_str(String *str)
|
|||
if (!(js= read_json(&je)))
|
||||
return NULL;
|
||||
|
||||
if (je.s.error || je.value_type != JSON_VALUE_STRING)
|
||||
if (unlikely(je.s.error) || je.value_type != JSON_VALUE_STRING)
|
||||
return js;
|
||||
|
||||
str->length(0);
|
||||
|
@ -835,7 +835,7 @@ String *Item_func_json_extract::read_json(String *str,
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto error;
|
||||
|
||||
if (!not_first_value)
|
||||
|
@ -994,7 +994,7 @@ static int check_contains(json_engine_t *js, json_engine_t *value)
|
|||
k_end= value->s.c_str;
|
||||
} while (json_read_keyname_chr(value) == 0);
|
||||
|
||||
if (value->s.error || json_read_value(value))
|
||||
if (unlikely(value->s.error) || json_read_value(value))
|
||||
return FALSE;
|
||||
|
||||
if (set_js)
|
||||
|
@ -1037,7 +1037,7 @@ static int check_contains(json_engine_t *js, json_engine_t *value)
|
|||
return FALSE;
|
||||
return TRUE;
|
||||
}
|
||||
if (value->s.error || js->s.error ||
|
||||
if (unlikely(value->s.error) || unlikely(js->s.error) ||
|
||||
(!v_scalar && json_skip_to_level(js, c_level)))
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -1165,7 +1165,7 @@ longlong Item_func_json_contains::val_int()
|
|||
goto error;
|
||||
|
||||
result= check_contains(&je, &ve);
|
||||
if (je.s.error || ve.s.error)
|
||||
if (unlikely(je.s.error || ve.s.error))
|
||||
goto error;
|
||||
|
||||
return result;
|
||||
|
@ -1385,7 +1385,7 @@ longlong Item_func_json_contains_path::val_int()
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error == 0)
|
||||
if (likely(je.s.error == 0))
|
||||
return result;
|
||||
|
||||
report_json_error(js, &je, 0);
|
||||
|
@ -1749,7 +1749,7 @@ String *Item_func_json_array_insert::val_str(String *str)
|
|||
goto js_error;
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto js_error;
|
||||
|
||||
str->length(0);
|
||||
|
@ -1881,7 +1881,7 @@ static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
|
|||
key_end= je1->s.c_str;
|
||||
} while (json_read_keyname_chr(je1) == 0);
|
||||
|
||||
if (je1->s.error)
|
||||
if (unlikely(je1->s.error))
|
||||
return 1;
|
||||
|
||||
if (first_key)
|
||||
|
@ -1916,7 +1916,7 @@ static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
|
|||
return ires;
|
||||
goto merged_j1;
|
||||
}
|
||||
if (je2->s.error)
|
||||
if (unlikely(je2->s.error))
|
||||
return 2;
|
||||
|
||||
key_start= je1->s.c_str;
|
||||
|
@ -1946,7 +1946,7 @@ merged_j1:
|
|||
key_end= je2->s.c_str;
|
||||
} while (json_read_keyname_chr(je2) == 0);
|
||||
|
||||
if (je2->s.error)
|
||||
if (unlikely(je2->s.error))
|
||||
return 1;
|
||||
|
||||
*je1= sav_je1;
|
||||
|
@ -1957,7 +1957,7 @@ merged_j1:
|
|||
json_string_set_str(&key_name, key_start, key_end);
|
||||
if (!json_key_matches(je1, &key_name))
|
||||
{
|
||||
if (je1->s.error || json_skip_key(je1))
|
||||
if (unlikely(je1->s.error || json_skip_key(je1)))
|
||||
return 2;
|
||||
continue;
|
||||
}
|
||||
|
@ -1967,7 +1967,7 @@ merged_j1:
|
|||
goto continue_j2;
|
||||
}
|
||||
|
||||
if (je1->s.error)
|
||||
if (unlikely(je1->s.error))
|
||||
return 2;
|
||||
|
||||
if (first_key)
|
||||
|
@ -2008,7 +2008,7 @@ continue_j2:
|
|||
empty_array= 0;
|
||||
}
|
||||
|
||||
if (je1->s.error)
|
||||
if (unlikely(je1->s.error))
|
||||
return 1;
|
||||
|
||||
end1= je1->s.c_str - je1->sav_c_len;
|
||||
|
@ -2206,7 +2206,7 @@ longlong Item_func_json_length::val_int()
|
|||
while (json_scan_next(&je) == 0) {}
|
||||
}
|
||||
|
||||
if (!je.s.error)
|
||||
if (likely(!je.s.error))
|
||||
return length;
|
||||
|
||||
err_return:
|
||||
|
@ -2260,7 +2260,7 @@ longlong Item_func_json_depth::val_int()
|
|||
}
|
||||
} while (json_scan_next(&je) == 0);
|
||||
|
||||
if (!je.s.error)
|
||||
if (likely(!je.s.error))
|
||||
return depth;
|
||||
|
||||
report_json_error(js, &je, 0);
|
||||
|
@ -2475,7 +2475,7 @@ String *Item_func_json_insert::val_str(String *str)
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto js_error;
|
||||
|
||||
if (!mode_insert)
|
||||
|
@ -2513,7 +2513,7 @@ String *Item_func_json_insert::val_str(String *str)
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto js_error;
|
||||
|
||||
if (!mode_insert)
|
||||
|
@ -2686,7 +2686,7 @@ String *Item_func_json_remove::val_str(String *str)
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto js_error;
|
||||
|
||||
continue;
|
||||
|
@ -2718,7 +2718,7 @@ String *Item_func_json_remove::val_str(String *str)
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto js_error;
|
||||
|
||||
continue;
|
||||
|
@ -2883,7 +2883,7 @@ skip_search:
|
|||
{
|
||||
key_end= je.s.c_str;
|
||||
} while (json_read_keyname_chr(&je) == 0);
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto err_return;
|
||||
key_len= (int)(key_end - key_start);
|
||||
|
||||
|
@ -2907,7 +2907,7 @@ skip_search:
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error || str->append("]", 1))
|
||||
if (unlikely(je.s.error || str->append("]", 1)))
|
||||
goto err_return;
|
||||
|
||||
null_value= 0;
|
||||
|
@ -3091,7 +3091,7 @@ String *Item_func_json_search::val_str(String *str)
|
|||
}
|
||||
}
|
||||
|
||||
if (je.s.error)
|
||||
if (unlikely(je.s.error))
|
||||
goto js_error;
|
||||
|
||||
end:
|
||||
|
|
|
@ -4918,7 +4918,7 @@ longlong Item_dyncol_get::val_int()
|
|||
char *end= val.x.string.value.str + val.x.string.value.length, *org_end= end;
|
||||
|
||||
num= my_strtoll10(val.x.string.value.str, &end, &error);
|
||||
if (end != org_end || error > 0)
|
||||
if (unlikely(end != org_end || error > 0))
|
||||
{
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_BAD_DATA,
|
||||
|
|
|
@ -710,7 +710,7 @@ bool Item_subselect::exec()
|
|||
Do not execute subselect in case of a fatal error
|
||||
or if the query has been killed.
|
||||
*/
|
||||
if (thd->is_error() || thd->killed)
|
||||
if (unlikely(thd->is_error() || thd->killed))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
DBUG_ASSERT(!thd->lex->context_analysis_only);
|
||||
|
@ -1417,14 +1417,14 @@ void Item_exists_subselect::print(String *str, enum_query_type query_type)
|
|||
|
||||
bool Item_in_subselect::test_limit(st_select_lex_unit *unit_arg)
|
||||
{
|
||||
if (unit_arg->fake_select_lex &&
|
||||
unit_arg->fake_select_lex->test_limit())
|
||||
if (unlikely(unit_arg->fake_select_lex &&
|
||||
unit_arg->fake_select_lex->test_limit()))
|
||||
return(1);
|
||||
|
||||
SELECT_LEX *sl= unit_arg->first_select();
|
||||
for (; sl; sl= sl->next_select())
|
||||
{
|
||||
if (sl->test_limit())
|
||||
if (unlikely(sl->test_limit()))
|
||||
return(1);
|
||||
}
|
||||
return(0);
|
||||
|
@ -3935,7 +3935,8 @@ int subselect_uniquesubquery_engine::scan_table()
|
|||
for (;;)
|
||||
{
|
||||
error=table->file->ha_rnd_next(table->record[0]);
|
||||
if (error) {
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
{
|
||||
error= 0;
|
||||
|
@ -4076,8 +4077,8 @@ int subselect_uniquesubquery_engine::exec()
|
|||
make_prev_keypart_map(tab->
|
||||
ref.key_parts),
|
||||
HA_READ_KEY_EXACT);
|
||||
if (error &&
|
||||
error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error &&
|
||||
error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE))
|
||||
error= report_error(table, error);
|
||||
else
|
||||
{
|
||||
|
@ -4115,7 +4116,8 @@ int subselect_uniquesubquery_engine::index_lookup()
|
|||
HA_READ_KEY_EXACT);
|
||||
DBUG_PRINT("info", ("lookup result: %i", error));
|
||||
|
||||
if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error && error != HA_ERR_KEY_NOT_FOUND &&
|
||||
error != HA_ERR_END_OF_FILE))
|
||||
{
|
||||
/*
|
||||
TIMOUR: I don't understand at all when do we need to call report_error.
|
||||
|
@ -4246,8 +4248,8 @@ int subselect_indexsubquery_engine::exec()
|
|||
make_prev_keypart_map(tab->
|
||||
ref.key_parts),
|
||||
HA_READ_KEY_EXACT);
|
||||
if (error &&
|
||||
error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error &&
|
||||
error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE))
|
||||
error= report_error(table, error);
|
||||
else
|
||||
{
|
||||
|
@ -4269,7 +4271,7 @@ int subselect_indexsubquery_engine::exec()
|
|||
error= table->file->ha_index_next_same(table->record[0],
|
||||
tab->ref.key_buff,
|
||||
tab->ref.key_length);
|
||||
if (error && error != HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error && error != HA_ERR_END_OF_FILE))
|
||||
{
|
||||
error= report_error(table, error);
|
||||
break;
|
||||
|
@ -4282,7 +4284,7 @@ int subselect_indexsubquery_engine::exec()
|
|||
*tab->ref.null_ref_key= 1;
|
||||
null_finding= 1;
|
||||
/* Check if there exists a row with a null value in the index */
|
||||
if ((error= (safe_index_read(tab) == 1)))
|
||||
if (unlikely((error= (safe_index_read(tab) == 1))))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5425,8 +5427,8 @@ int subselect_hash_sj_engine::exec()
|
|||
DBUG_ASSERT(materialize_join->optimization_state == JOIN::OPTIMIZATION_DONE &&
|
||||
!is_materialized);
|
||||
materialize_join->exec();
|
||||
if ((res= MY_TEST(materialize_join->error || thd->is_fatal_error ||
|
||||
thd->is_error())))
|
||||
if (unlikely((res= MY_TEST(materialize_join->error || thd->is_fatal_error ||
|
||||
thd->is_error()))))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
|
@ -5784,14 +5786,14 @@ Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b)
|
|||
rowid_a= row_num_to_rowid + a * rowid_length;
|
||||
rowid_b= row_num_to_rowid + b * rowid_length;
|
||||
/* Fetch the rows for comparison. */
|
||||
if ((error= tbl->file->ha_rnd_pos(tbl->record[0], rowid_a)))
|
||||
if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[0], rowid_a))))
|
||||
{
|
||||
/* purecov: begin inspected */
|
||||
tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error
|
||||
return 0;
|
||||
/* purecov: end */
|
||||
}
|
||||
if ((error= tbl->file->ha_rnd_pos(tbl->record[1], rowid_b)))
|
||||
if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[1], rowid_b))))
|
||||
{
|
||||
/* purecov: begin inspected */
|
||||
tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error
|
||||
|
@ -5873,7 +5875,7 @@ int Ordered_key::cmp_key_with_search_key(rownum_t row_num)
|
|||
int __attribute__((unused)) error;
|
||||
int cmp_res;
|
||||
|
||||
if ((error= tbl->file->ha_rnd_pos(tbl->record[0], cur_rowid)))
|
||||
if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[0], cur_rowid))))
|
||||
{
|
||||
/* purecov: begin inspected */
|
||||
tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error
|
||||
|
@ -6222,7 +6224,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
|||
DBUG_ASSERT(cur_keyid == merge_keys_count);
|
||||
|
||||
/* Populate the indexes with data from the temporary table. */
|
||||
if (tmp_table->file->ha_rnd_init_with_error(1))
|
||||
if (unlikely(tmp_table->file->ha_rnd_init_with_error(1)))
|
||||
return TRUE;
|
||||
tmp_table->file->extra_opt(HA_EXTRA_CACHE,
|
||||
current_thd->variables.read_buff_size);
|
||||
|
@ -6230,7 +6232,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
|||
while (TRUE)
|
||||
{
|
||||
error= tmp_table->file->ha_rnd_next(tmp_table->record[0]);
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
if (unlikely(error == HA_ERR_RECORD_DELETED))
|
||||
{
|
||||
/* We get this for duplicate records that should not be in tmp_table. */
|
||||
continue;
|
||||
|
@ -6240,7 +6242,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
|
|||
cause to stop the iteration than EOF.
|
||||
*/
|
||||
DBUG_ASSERT(!error || error == HA_ERR_END_OF_FILE);
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
if (unlikely(error == HA_ERR_END_OF_FILE))
|
||||
{
|
||||
DBUG_ASSERT(cur_rownum == tmp_table->file->stats.records);
|
||||
break;
|
||||
|
@ -6460,7 +6462,7 @@ bool subselect_rowid_merge_engine::partial_match()
|
|||
DBUG_ASSERT(!pq.elements);
|
||||
|
||||
/* All data accesses during execution are via handler::ha_rnd_pos() */
|
||||
if (tmp_table->file->ha_rnd_init_with_error(0))
|
||||
if (unlikely(tmp_table->file->ha_rnd_init_with_error(0)))
|
||||
{
|
||||
res= FALSE;
|
||||
goto end;
|
||||
|
@ -6666,7 +6668,7 @@ bool subselect_table_scan_engine::partial_match()
|
|||
int error;
|
||||
bool res;
|
||||
|
||||
if (tmp_table->file->ha_rnd_init_with_error(1))
|
||||
if (unlikely(tmp_table->file->ha_rnd_init_with_error(1)))
|
||||
{
|
||||
res= FALSE;
|
||||
goto end;
|
||||
|
@ -6677,7 +6679,8 @@ bool subselect_table_scan_engine::partial_match()
|
|||
for (;;)
|
||||
{
|
||||
error= tmp_table->file->ha_rnd_next(tmp_table->record[0]);
|
||||
if (error) {
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
{
|
||||
error= 0;
|
||||
|
|
|
@ -999,7 +999,7 @@ bool Aggregator_distinct::add()
|
|||
*/
|
||||
return tree->unique_add(table->record[0] + table->s->null_bytes);
|
||||
}
|
||||
if ((error= table->file->ha_write_tmp_row(table->record[0])) &&
|
||||
if (unlikely((error= table->file->ha_write_tmp_row(table->record[0]))) &&
|
||||
table->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
return TRUE;
|
||||
return FALSE;
|
||||
|
|
|
@ -257,8 +257,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
|
|||
break;
|
||||
case 'w':
|
||||
tmp= (char*) val + 1;
|
||||
if ((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
|
||||
weekday >= 7)
|
||||
if (unlikely((weekday= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
|
||||
weekday >= 7))
|
||||
goto err;
|
||||
/* We should use the same 1 - 7 scale for %w as for %W */
|
||||
if (!weekday)
|
||||
|
@ -279,9 +279,10 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
|
|||
sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V');
|
||||
strict_week_number= (*ptr=='V' || *ptr=='v');
|
||||
tmp= (char*) val + MY_MIN(val_len, 2);
|
||||
if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
|
||||
(strict_week_number && !week_number) ||
|
||||
week_number > 53)
|
||||
if (unlikely((week_number=
|
||||
(int) my_strtoll10(val, &tmp, &error)) < 0 ||
|
||||
(strict_week_number && !week_number) ||
|
||||
week_number > 53))
|
||||
goto err;
|
||||
val= tmp;
|
||||
break;
|
||||
|
@ -331,7 +332,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
|
|||
default:
|
||||
goto err;
|
||||
}
|
||||
if (error) // Error from my_strtoll10
|
||||
if (unlikely(error)) // Error from my_strtoll10
|
||||
goto err;
|
||||
}
|
||||
else if (!my_isspace(cs, *ptr))
|
||||
|
|
|
@ -393,7 +393,7 @@ static int lock_external(THD *thd, TABLE **tables, uint count)
|
|||
(*tables)->reginfo.lock_type <= TL_READ_NO_INSERT))
|
||||
lock_type=F_RDLCK;
|
||||
|
||||
if ((error=(*tables)->file->ha_external_lock(thd,lock_type)))
|
||||
if (unlikely((error=(*tables)->file->ha_external_lock(thd,lock_type))))
|
||||
{
|
||||
(*tables)->file->print_error(error, MYF(0));
|
||||
while (--i)
|
||||
|
@ -439,7 +439,7 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock, bool free_lock)
|
|||
DBUG_ASSERT(!(sql_lock->flags & GET_LOCK_ON_THD));
|
||||
my_free(sql_lock);
|
||||
}
|
||||
if (!errors)
|
||||
if (likely(!errors))
|
||||
thd->clear_error();
|
||||
THD_STAGE_INFO(thd, org_stage);
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -726,7 +726,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
|
|||
if ((*table)->current_lock != F_UNLCK)
|
||||
{
|
||||
(*table)->current_lock = F_UNLCK;
|
||||
if ((error=(*table)->file->ha_external_lock(thd, F_UNLCK)))
|
||||
if (unlikely((error=(*table)->file->ha_external_lock(thd, F_UNLCK))))
|
||||
{
|
||||
error_code= error;
|
||||
(*table)->file->print_error(error, MYF(0));
|
||||
|
|
135
sql/log.cc
135
sql/log.cc
|
@ -2044,7 +2044,7 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all)
|
|||
- We are in a transaction and a full transaction is committed.
|
||||
Otherwise, we accumulate the changes.
|
||||
*/
|
||||
if (!error && ending_trans(thd, all))
|
||||
if (likely(!error) && ending_trans(thd, all))
|
||||
error= binlog_commit_flush_trx_cache(thd, all, cache_mngr);
|
||||
|
||||
/*
|
||||
|
@ -2122,7 +2122,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
|
|||
*/
|
||||
error |= binlog_truncate_trx_cache(thd, cache_mngr, all);
|
||||
}
|
||||
else if (!error)
|
||||
else if (likely(!error))
|
||||
{
|
||||
if (ending_trans(thd, all) && trans_cannot_safely_rollback(thd, all))
|
||||
error= binlog_rollback_flush_trx_cache(thd, all, cache_mngr);
|
||||
|
@ -2174,7 +2174,7 @@ void MYSQL_BIN_LOG::set_write_error(THD *thd, bool is_transactional)
|
|||
|
||||
write_error= 1;
|
||||
|
||||
if (check_write_error(thd))
|
||||
if (unlikely(check_write_error(thd)))
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
if (my_errno == EFBIG)
|
||||
|
@ -2202,7 +2202,7 @@ bool MYSQL_BIN_LOG::check_write_error(THD *thd)
|
|||
|
||||
bool checked= FALSE;
|
||||
|
||||
if (!thd->is_error())
|
||||
if (likely(!thd->is_error()))
|
||||
DBUG_RETURN(checked);
|
||||
|
||||
switch (thd->get_stmt_da()->sql_errno())
|
||||
|
@ -2273,7 +2273,7 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
|
|||
or "RELEASE S" without the preceding "SAVEPOINT S" in the binary
|
||||
log.
|
||||
*/
|
||||
if (!(error= mysql_bin_log.write(&qinfo)))
|
||||
if (likely(!(error= mysql_bin_log.write(&qinfo))))
|
||||
binlog_trans_log_savepos(thd, (my_off_t*) sv);
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -2474,7 +2474,8 @@ static int find_uniq_filename(char *name, ulong next_log_number)
|
|||
length= (size_t) (end - start + 1);
|
||||
|
||||
if ((DBUG_EVALUATE_IF("error_unique_log_filename", 1,
|
||||
!(dir_info= my_dir(buff,MYF(MY_DONT_SORT))))))
|
||||
unlikely(!(dir_info= my_dir(buff,
|
||||
MYF(MY_DONT_SORT)))))))
|
||||
{ // This shouldn't happen
|
||||
strmov(end,".1"); // use name+1
|
||||
DBUG_RETURN(1);
|
||||
|
@ -2784,10 +2785,10 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name,
|
|||
if (!fn_ext(log_name)[0])
|
||||
{
|
||||
if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) ||
|
||||
find_uniq_filename(new_name, next_log_number))
|
||||
unlikely(find_uniq_filename(new_name, next_log_number)))
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
if (thd)
|
||||
if (unlikely(thd))
|
||||
my_error(ER_NO_UNIQUE_LOGFILE, MYF(ME_FATALERROR), log_name);
|
||||
sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name);
|
||||
return 1;
|
||||
|
@ -3136,7 +3137,7 @@ end:
|
|||
|
||||
err:
|
||||
error= 1;
|
||||
if (! write_error)
|
||||
if (!write_error)
|
||||
{
|
||||
write_error= 1;
|
||||
sql_print_error(ER_THD(thd, ER_ERROR_ON_WRITE), name, errno);
|
||||
|
@ -4192,7 +4193,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log,
|
|||
|
||||
for (;;)
|
||||
{
|
||||
if ((error= my_delete(linfo.log_file_name, MYF(0))) != 0)
|
||||
if (unlikely((error= my_delete(linfo.log_file_name, MYF(0)))))
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
{
|
||||
|
@ -4235,7 +4236,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log,
|
|||
|
||||
/* Start logging with a new file */
|
||||
close(LOG_CLOSE_INDEX | LOG_CLOSE_TO_BE_OPENED);
|
||||
if ((error= my_delete(index_file_name, MYF(0)))) // Reset (open will update)
|
||||
// Reset (open will update)
|
||||
if (unlikely((error= my_delete(index_file_name, MYF(0)))))
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
{
|
||||
|
@ -4264,8 +4266,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log,
|
|||
}
|
||||
}
|
||||
if (create_new_log && !open_index_file(index_file_name, 0, FALSE))
|
||||
if ((error= open(save_name, log_type, 0, next_log_number,
|
||||
io_cache_type, max_size, 0, FALSE)))
|
||||
if (unlikely((error= open(save_name, log_type, 0, next_log_number,
|
||||
io_cache_type, max_size, 0, FALSE))))
|
||||
goto err;
|
||||
my_free((void *) save_name);
|
||||
|
||||
|
@ -4413,8 +4415,9 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
|
|||
Read the next log file name from the index file and pass it back to
|
||||
the caller.
|
||||
*/
|
||||
if((error=find_log_pos(&rli->linfo, rli->event_relay_log_name, 0)) ||
|
||||
(error=find_next_log(&rli->linfo, 0)))
|
||||
if (unlikely((error=find_log_pos(&rli->linfo, rli->event_relay_log_name,
|
||||
0))) ||
|
||||
unlikely((error=find_next_log(&rli->linfo, 0))))
|
||||
{
|
||||
sql_print_error("next log error: %d offset: %llu log: %s included: %d",
|
||||
error, rli->linfo.index_file_offset,
|
||||
|
@ -4529,14 +4532,14 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
|
|||
|
||||
if (need_mutex)
|
||||
mysql_mutex_lock(&LOCK_index);
|
||||
if ((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/)))
|
||||
if (unlikely((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/))) )
|
||||
{
|
||||
sql_print_error("MYSQL_BIN_LOG::purge_logs was called with file %s not "
|
||||
"listed in the index.", to_log);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((error= open_purge_index_file(TRUE)))
|
||||
if (unlikely((error= open_purge_index_file(TRUE))))
|
||||
{
|
||||
sql_print_error("MYSQL_BIN_LOG::purge_logs failed to sync the index file.");
|
||||
goto err;
|
||||
|
@ -4546,12 +4549,12 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
|
|||
File name exists in index file; delete until we find this file
|
||||
or a file that is used.
|
||||
*/
|
||||
if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/)))
|
||||
if (unlikely((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))))
|
||||
goto err;
|
||||
while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) &&
|
||||
can_purge_log(log_info.log_file_name))
|
||||
{
|
||||
if ((error= register_purge_index_entry(log_info.log_file_name)))
|
||||
if (unlikely((error= register_purge_index_entry(log_info.log_file_name))))
|
||||
{
|
||||
sql_print_error("MYSQL_BIN_LOG::purge_logs failed to copy %s to register file.",
|
||||
log_info.log_file_name);
|
||||
|
@ -4564,14 +4567,14 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
|
|||
|
||||
DBUG_EXECUTE_IF("crash_purge_before_update_index", DBUG_SUICIDE(););
|
||||
|
||||
if ((error= sync_purge_index_file()))
|
||||
if (unlikely((error= sync_purge_index_file())))
|
||||
{
|
||||
sql_print_error("MYSQL_BIN_LOG::purge_logs failed to flush register file.");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* We know how many files to delete. Update index file. */
|
||||
if ((error=update_log_index(&log_info, need_update_threads)))
|
||||
if (unlikely((error=update_log_index(&log_info, need_update_threads))))
|
||||
{
|
||||
sql_print_error("MYSQL_BIN_LOG::purge_logs failed to update the index file");
|
||||
goto err;
|
||||
|
@ -4662,8 +4665,9 @@ int MYSQL_BIN_LOG::sync_purge_index_file()
|
|||
int error= 0;
|
||||
DBUG_ENTER("MYSQL_BIN_LOG::sync_purge_index_file");
|
||||
|
||||
if ((error= flush_io_cache(&purge_index_file)) ||
|
||||
(error= my_sync(purge_index_file.file, MYF(MY_WME|MY_SYNC_FILESIZE))))
|
||||
if (unlikely((error= flush_io_cache(&purge_index_file))) ||
|
||||
unlikely((error= my_sync(purge_index_file.file,
|
||||
MYF(MY_WME | MY_SYNC_FILESIZE)))))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -4674,8 +4678,9 @@ int MYSQL_BIN_LOG::register_purge_index_entry(const char *entry)
|
|||
int error= 0;
|
||||
DBUG_ENTER("MYSQL_BIN_LOG::register_purge_index_entry");
|
||||
|
||||
if ((error=my_b_write(&purge_index_file, (const uchar*)entry, strlen(entry))) ||
|
||||
(error=my_b_write(&purge_index_file, (const uchar*)"\n", 1)))
|
||||
if (unlikely((error=my_b_write(&purge_index_file, (const uchar*)entry,
|
||||
strlen(entry)))) ||
|
||||
unlikely((error=my_b_write(&purge_index_file, (const uchar*)"\n", 1))))
|
||||
DBUG_RETURN (error);
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -4698,7 +4703,8 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *reclaimed_space,
|
|||
|
||||
DBUG_ASSERT(my_b_inited(&purge_index_file));
|
||||
|
||||
if ((error=reinit_io_cache(&purge_index_file, READ_CACHE, 0, 0, 0)))
|
||||
if (unlikely((error= reinit_io_cache(&purge_index_file, READ_CACHE, 0, 0,
|
||||
0))))
|
||||
{
|
||||
sql_print_error("MYSQL_BIN_LOG::purge_index_entry failed to reinit register file "
|
||||
"for read");
|
||||
|
@ -4727,7 +4733,8 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *reclaimed_space,
|
|||
/* Get rid of the trailing '\n' */
|
||||
log_info.log_file_name[length-1]= 0;
|
||||
|
||||
if (!mysql_file_stat(m_key_file_log, log_info.log_file_name, &s, MYF(0)))
|
||||
if (unlikely(!mysql_file_stat(m_key_file_log, log_info.log_file_name, &s,
|
||||
MYF(0))))
|
||||
{
|
||||
if (my_errno == ENOENT)
|
||||
{
|
||||
|
@ -4774,7 +4781,8 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *reclaimed_space,
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= find_log_pos(&check_log_info, log_info.log_file_name, need_mutex)))
|
||||
if (unlikely((error= find_log_pos(&check_log_info,
|
||||
log_info.log_file_name, need_mutex))))
|
||||
{
|
||||
if (error != LOG_INFO_EOF)
|
||||
{
|
||||
|
@ -4887,7 +4895,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
|
|||
mysql_mutex_lock(&LOCK_index);
|
||||
to_log[0]= 0;
|
||||
|
||||
if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/)))
|
||||
if (unlikely((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))))
|
||||
goto err;
|
||||
|
||||
while (strcmp(log_file_name, log_info.log_file_name) &&
|
||||
|
@ -5154,7 +5162,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
|
|||
We have to do this here and not in open as we want to store the
|
||||
new file name in the current binary log file.
|
||||
*/
|
||||
if ((error= generate_new_name(new_name, name, 0)))
|
||||
if (unlikely((error= generate_new_name(new_name, name, 0))))
|
||||
{
|
||||
#ifdef ENABLE_AND_FIX_HANG
|
||||
close_on_error= TRUE;
|
||||
|
@ -5198,7 +5206,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
|
|||
log rotation should give the waiting thread a signal to
|
||||
discover EOF and move on to the next log.
|
||||
*/
|
||||
if ((error= flush_io_cache(&log_file)))
|
||||
if (unlikely((error= flush_io_cache(&log_file))))
|
||||
{
|
||||
close_on_error= TRUE;
|
||||
goto end;
|
||||
|
@ -5244,7 +5252,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
|
|||
/* reopen index binlog file, BUG#34582 */
|
||||
file_to_open= index_file_name;
|
||||
error= open_index_file(index_file_name, 0, FALSE);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
/* reopen the binary log file. */
|
||||
file_to_open= new_name_ptr;
|
||||
|
@ -5253,7 +5261,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
|
|||
}
|
||||
|
||||
/* handle reopening errors */
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_error(ER_CANT_OPEN_FILE, MYF(ME_FATALERROR), file_to_open, error);
|
||||
close_on_error= TRUE;
|
||||
|
@ -5269,7 +5277,7 @@ end:
|
|||
mysql_file_close(old_file, MYF(MY_WME));
|
||||
}
|
||||
|
||||
if (error && close_on_error /* rotate or reopen failed */)
|
||||
if (unlikely(error && close_on_error)) /* rotate or reopen failed */
|
||||
{
|
||||
/*
|
||||
Close whatever was left opened.
|
||||
|
@ -5395,7 +5403,7 @@ bool MYSQL_BIN_LOG::write_event_buffer(uchar* buf, uint len)
|
|||
error= new_file_without_locking();
|
||||
err:
|
||||
my_safe_afree(ebuf, len);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
update_binlog_end_pos();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -5778,14 +5786,14 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional,
|
|||
Annotate_rows_log_event anno(table->in_use, is_transactional, false);
|
||||
/* Annotate event should be written not more than once */
|
||||
*with_annotate= 0;
|
||||
if ((error= writer.write(&anno)))
|
||||
if (unlikely((error= writer.write(&anno))))
|
||||
{
|
||||
if (my_errno == EFBIG)
|
||||
cache_data->set_incident();
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
if ((error= writer.write(&the_event)))
|
||||
if (unlikely((error= writer.write(&the_event))))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
binlog_table_maps++;
|
||||
|
@ -6433,7 +6441,7 @@ err:
|
|||
bool check_purge= false;
|
||||
DBUG_ASSERT(!is_relay_log);
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
bool synced;
|
||||
|
||||
|
@ -6463,7 +6471,7 @@ err:
|
|||
it's list before dump-thread tries to send it
|
||||
*/
|
||||
update_binlog_end_pos(offset);
|
||||
if ((error= rotate(false, &check_purge)))
|
||||
if (unlikely((error= rotate(false, &check_purge))))
|
||||
check_purge= false;
|
||||
}
|
||||
}
|
||||
|
@ -6501,7 +6509,7 @@ err:
|
|||
checkpoint_and_purge(prev_binlog_id);
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
set_write_error(thd, is_trans_cache);
|
||||
if (check_write_error(thd) && cache_data &&
|
||||
|
@ -6697,7 +6705,7 @@ int MYSQL_BIN_LOG::rotate(bool force_rotate, bool* check_purge)
|
|||
*/
|
||||
mark_xids_active(binlog_id, 1);
|
||||
|
||||
if ((error= new_file_without_locking()))
|
||||
if (unlikely((error= new_file_without_locking())))
|
||||
{
|
||||
/**
|
||||
Be conservative... There are possible lost events (eg,
|
||||
|
@ -6893,7 +6901,7 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate,
|
|||
if (err_gtid < 0)
|
||||
error= 1; // otherwise error is propagated the user
|
||||
}
|
||||
else if ((error= rotate(force_rotate, &check_purge)))
|
||||
else if (unlikely((error= rotate(force_rotate, &check_purge))))
|
||||
check_purge= false;
|
||||
/*
|
||||
NOTE: Run purge_logs wo/ holding LOCK_log because it does not need
|
||||
|
@ -7134,6 +7142,8 @@ int query_error_code(THD *thd, bool not_killed)
|
|||
if (not_killed || (killed_mask_hard(thd->killed) == KILL_BAD_DATA))
|
||||
{
|
||||
error= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0;
|
||||
if (!error)
|
||||
return error;
|
||||
|
||||
/* thd->get_get_stmt_da()->sql_errno() might be ER_SERVER_SHUTDOWN or
|
||||
ER_QUERY_INTERRUPTED, So here we need to make sure that error
|
||||
|
@ -7184,11 +7194,11 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
|
|||
if (likely(is_open()))
|
||||
{
|
||||
prev_binlog_id= current_binlog_id;
|
||||
if (!(error= write_incident_already_locked(thd)) &&
|
||||
!(error= flush_and_sync(0)))
|
||||
if (likely(!(error= write_incident_already_locked(thd))) &&
|
||||
likely(!(error= flush_and_sync(0))))
|
||||
{
|
||||
update_binlog_end_pos();
|
||||
if ((error= rotate(false, &check_purge)))
|
||||
if (unlikely((error= rotate(false, &check_purge))))
|
||||
check_purge= false;
|
||||
}
|
||||
|
||||
|
@ -7864,7 +7874,8 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
|
|||
*/
|
||||
DBUG_ASSERT(!cache_mngr->stmt_cache.empty() || !cache_mngr->trx_cache.empty());
|
||||
|
||||
if ((current->error= write_transaction_or_stmt(current, commit_id)))
|
||||
if (unlikely((current->error= write_transaction_or_stmt(current,
|
||||
commit_id))))
|
||||
current->commit_errno= errno;
|
||||
|
||||
strmake_buf(cache_mngr->last_commit_pos_file, log_file_name);
|
||||
|
@ -7892,7 +7903,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
|
|||
}
|
||||
|
||||
bool synced= 0;
|
||||
if (flush_and_sync(&synced))
|
||||
if (unlikely(flush_and_sync(&synced)))
|
||||
{
|
||||
for (current= queue; current != NULL; current= current->next)
|
||||
{
|
||||
|
@ -7916,12 +7927,13 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
|
|||
for (current= queue; current != NULL; current= current->next)
|
||||
{
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (!current->error &&
|
||||
repl_semisync_master.
|
||||
report_binlog_update(current->thd,
|
||||
current->cache_mngr->last_commit_pos_file,
|
||||
current->cache_mngr->
|
||||
last_commit_pos_offset))
|
||||
if (likely(!current->error) &&
|
||||
unlikely(repl_semisync_master.
|
||||
report_binlog_update(current->thd,
|
||||
current->cache_mngr->
|
||||
last_commit_pos_file,
|
||||
current->cache_mngr->
|
||||
last_commit_pos_offset)))
|
||||
{
|
||||
current->error= ER_ERROR_ON_WRITE;
|
||||
current->commit_errno= -1;
|
||||
|
@ -7939,7 +7951,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
|
|||
*/
|
||||
update_binlog_end_pos(commit_offset);
|
||||
|
||||
if (any_error)
|
||||
if (unlikely(any_error))
|
||||
sql_print_error("Failed to run 'after_flush' hooks");
|
||||
}
|
||||
|
||||
|
@ -8004,7 +8016,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
|
|||
{
|
||||
last= current->next == NULL;
|
||||
#ifdef HAVE_REPLICATION
|
||||
if (!current->error)
|
||||
if (likely(!current->error))
|
||||
current->error=
|
||||
repl_semisync_master.wait_after_sync(current->cache_mngr->
|
||||
last_commit_pos_file,
|
||||
|
@ -8066,7 +8078,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
|
|||
|
||||
DEBUG_SYNC(leader->thd, "commit_loop_entry_commit_ordered");
|
||||
++num_commits;
|
||||
if (current->cache_mngr->using_xa && !current->error &&
|
||||
if (current->cache_mngr->using_xa && likely(!current->error) &&
|
||||
DBUG_EVALUATE_IF("skip_commit_ordered", 0, 1))
|
||||
run_commit_ordered(current->thd, current->all);
|
||||
current->thd->wakeup_subsequent_commits(current->error);
|
||||
|
@ -8158,12 +8170,12 @@ MYSQL_BIN_LOG::write_transaction_or_stmt(group_commit_entry *entry,
|
|||
}
|
||||
}
|
||||
|
||||
if (mngr->get_binlog_cache_log(FALSE)->error) // Error on read
|
||||
if (unlikely(mngr->get_binlog_cache_log(FALSE)->error))
|
||||
{
|
||||
entry->error_cache= &mngr->stmt_cache.cache_log;
|
||||
DBUG_RETURN(ER_ERROR_ON_WRITE);
|
||||
}
|
||||
if (mngr->get_binlog_cache_log(TRUE)->error) // Error on read
|
||||
if (unlikely(mngr->get_binlog_cache_log(TRUE)->error)) // Error on read
|
||||
{
|
||||
entry->error_cache= &mngr->trx_cache.cache_log;
|
||||
DBUG_RETURN(ER_ERROR_ON_WRITE);
|
||||
|
@ -8455,7 +8467,8 @@ void MYSQL_BIN_LOG::close(uint exiting)
|
|||
if ((exiting & LOG_CLOSE_INDEX) && my_b_inited(&index_file))
|
||||
{
|
||||
end_io_cache(&index_file);
|
||||
if (mysql_file_close(index_file.file, MYF(0)) < 0 && ! write_error)
|
||||
if (unlikely(mysql_file_close(index_file.file, MYF(0)) < 0) &&
|
||||
! write_error)
|
||||
{
|
||||
write_error= 1;
|
||||
sql_print_error(ER_THD_OR_DEFAULT(current_thd, ER_ERROR_ON_WRITE),
|
||||
|
@ -10218,7 +10231,7 @@ MYSQL_BIN_LOG::do_binlog_recovery(const char *opt_name, bool do_xa_recovery)
|
|||
char log_name[FN_REFLEN];
|
||||
int error;
|
||||
|
||||
if ((error= find_log_pos(&log_info, NullS, 1)))
|
||||
if (unlikely((error= find_log_pos(&log_info, NullS, 1))))
|
||||
{
|
||||
/*
|
||||
If there are no binlog files (LOG_INFO_EOF), then we still try to read
|
||||
|
@ -10276,7 +10289,7 @@ MYSQL_BIN_LOG::do_binlog_recovery(const char *opt_name, bool do_xa_recovery)
|
|||
else
|
||||
{
|
||||
error= read_state_from_file();
|
||||
if (error == 2)
|
||||
if (unlikely(error == 2))
|
||||
{
|
||||
/*
|
||||
The binlog exists, but the .state file is missing. This is normal if
|
||||
|
|
136
sql/log_event.cc
136
sql/log_event.cc
|
@ -395,7 +395,7 @@ static bool pretty_print_str(IO_CACHE* cache, const char* str, int len)
|
|||
error= my_b_write_byte(cache, c);
|
||||
break;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
return my_b_write_byte(cache, '\'');
|
||||
|
@ -1925,7 +1925,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file,
|
|||
res->register_temp_buf(event.release(), true);
|
||||
|
||||
err:
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_ASSERT(!res);
|
||||
#ifdef MYSQL_CLIENT
|
||||
|
@ -3121,7 +3121,7 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
|
|||
typestr, sizeof(typestr));
|
||||
error= copy_event_cache_to_string_and_reinit(&tmp_cache, &review_str);
|
||||
close_cached_file(&tmp_cache);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
return 0;
|
||||
|
||||
switch (td->type(i)) // Converting a string to HEX format
|
||||
|
@ -3741,15 +3741,15 @@ bool Log_event::print_base64(IO_CACHE* file,
|
|||
}
|
||||
|
||||
if (my_b_tell(file) == 0)
|
||||
if (my_b_write_string(file, "\nBINLOG '\n"))
|
||||
if (unlikely(my_b_write_string(file, "\nBINLOG '\n")))
|
||||
error= 1;
|
||||
if (!error && my_b_printf(file, "%s\n", tmp_str))
|
||||
if (likely(!error) && unlikely(my_b_printf(file, "%s\n", tmp_str)))
|
||||
error= 1;
|
||||
if (!more && !error)
|
||||
if (my_b_printf(file, "'%s\n", print_event_info->delimiter))
|
||||
if (!more && likely(!error))
|
||||
if (unlikely(my_b_printf(file, "'%s\n", print_event_info->delimiter)))
|
||||
error= 1;
|
||||
my_free(tmp_str);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -3856,7 +3856,7 @@ bool Log_event::print_base64(IO_CACHE* file,
|
|||
|
||||
error= ev->print_verbose(&tmp_cache, print_event_info);
|
||||
close_cached_file(&tmp_cache);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
delete ev;
|
||||
goto err;
|
||||
|
@ -3869,7 +3869,7 @@ bool Log_event::print_base64(IO_CACHE* file,
|
|||
ev->count_row_events(print_event_info);
|
||||
#endif
|
||||
delete ev;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
@ -5447,7 +5447,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
|
|||
thd->variables.pseudo_thread_id= thread_id; // for temp tables
|
||||
DBUG_PRINT("query",("%s", thd->query()));
|
||||
|
||||
if (!(expected_error= error_code) ||
|
||||
if (unlikely(!(expected_error= error_code)) ||
|
||||
ignored_error_code(expected_error) ||
|
||||
!unexpected_error_code(expected_error))
|
||||
{
|
||||
|
@ -5579,8 +5579,10 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
|
|||
rgi->gtid_pending= false;
|
||||
|
||||
gtid= rgi->current_gtid;
|
||||
if (rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id,
|
||||
true, false, &hton))
|
||||
if (unlikely(rpl_global_gtid_slave_state->record_gtid(thd, >id,
|
||||
sub_id,
|
||||
true, false,
|
||||
&hton)))
|
||||
{
|
||||
int errcode= thd->get_stmt_da()->sql_errno();
|
||||
if (!is_parallel_retry_error(rgi, errcode))
|
||||
|
@ -5607,7 +5609,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
|
|||
it is a concurrency issue or ignorable issue, effects
|
||||
of the statement should be rolled back.
|
||||
*/
|
||||
if (expected_error &&
|
||||
if (unlikely(expected_error) &&
|
||||
(ignored_error_code(expected_error) ||
|
||||
concurrency_error_code(expected_error)))
|
||||
{
|
||||
|
@ -5676,7 +5678,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query());
|
|||
}
|
||||
|
||||
/* If the query was not ignored, it is printed to the general log */
|
||||
if (!thd->is_error() ||
|
||||
if (likely(!thd->is_error()) ||
|
||||
thd->get_stmt_da()->sql_errno() != ER_SLAVE_IGNORED_TABLE)
|
||||
general_log_write(thd, COM_QUERY, thd->query(), thd->query_length());
|
||||
else
|
||||
|
@ -5715,7 +5717,7 @@ compare_errors:
|
|||
DBUG_PRINT("info",("expected_error: %d sql_errno: %d",
|
||||
expected_error, actual_error));
|
||||
|
||||
if ((expected_error &&
|
||||
if ((unlikely(expected_error) &&
|
||||
!test_if_equal_repl_errors(expected_error, actual_error) &&
|
||||
!concurrency_error_code(expected_error)) &&
|
||||
!ignored_error_code(actual_error) &&
|
||||
|
@ -5750,7 +5752,7 @@ compare_errors:
|
|||
/*
|
||||
Other cases: mostly we expected no error and get one.
|
||||
*/
|
||||
else if (thd->is_slave_error || thd->is_fatal_error)
|
||||
else if (unlikely(thd->is_slave_error || thd->is_fatal_error))
|
||||
{
|
||||
if (!is_parallel_retry_error(rgi, actual_error))
|
||||
rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(),
|
||||
|
@ -5800,7 +5802,7 @@ compare_errors:
|
|||
}
|
||||
|
||||
end:
|
||||
if (sub_id && !thd->is_slave_error)
|
||||
if (unlikely(sub_id && !thd->is_slave_error))
|
||||
rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi);
|
||||
|
||||
/*
|
||||
|
@ -7476,7 +7478,7 @@ error:
|
|||
DBUG_EXECUTE_IF("LOAD_DATA_INFILE_has_fatal_error",
|
||||
thd->is_slave_error= 0; thd->is_fatal_error= 1;);
|
||||
|
||||
if (thd->is_slave_error)
|
||||
if (unlikely(thd->is_slave_error))
|
||||
{
|
||||
/* this err/sql_errno code is copy-paste from net_send_error() */
|
||||
const char *err;
|
||||
|
@ -7499,7 +7501,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
|
|||
}
|
||||
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
|
||||
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
{
|
||||
char buf[256];
|
||||
my_snprintf(buf, sizeof(buf),
|
||||
|
@ -8915,7 +8917,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
gtid= rgi->current_gtid;
|
||||
err= rpl_global_gtid_slave_state->record_gtid(thd, >id, sub_id, true,
|
||||
false, &hton);
|
||||
if (err)
|
||||
if (unlikely(err))
|
||||
{
|
||||
int ec= thd->get_stmt_da()->sql_errno();
|
||||
/*
|
||||
|
@ -8946,7 +8948,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
res= trans_commit(thd); /* Automatically rolls back on error. */
|
||||
thd->mdl_context.release_transactional_locks();
|
||||
|
||||
if (!res && sub_id)
|
||||
if (likely(!res) && sub_id)
|
||||
rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi);
|
||||
|
||||
/*
|
||||
|
@ -9200,7 +9202,7 @@ User_var_log_event(const char* buf, uint event_len,
|
|||
}
|
||||
|
||||
err:
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
name= 0;
|
||||
}
|
||||
|
||||
|
@ -9380,7 +9382,7 @@ bool User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
|
|||
cs->csname, hex_str, cs->name,
|
||||
print_event_info->delimiter);
|
||||
my_free(hex_str);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
|
@ -9918,9 +9920,9 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
error=0; // Everything is ok
|
||||
|
||||
err:
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
end_io_cache(&file);
|
||||
if (fd >= 0)
|
||||
if (likely(fd >= 0))
|
||||
mysql_file_close(fd, MYF(0));
|
||||
return error != 0;
|
||||
}
|
||||
|
@ -10631,7 +10633,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
If there was an error the slave is going to stop, leave the
|
||||
file so that we can re-execute this event at START SLAVE.
|
||||
*/
|
||||
if (!error)
|
||||
if (unlikely(!error))
|
||||
mysql_file_delete(key_file_log_event_data, fname, MYF(MY_WME));
|
||||
|
||||
my_free(buf);
|
||||
|
@ -11271,7 +11273,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
lex->query_tables_last= &tables->next_global;
|
||||
}
|
||||
}
|
||||
if (open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0))
|
||||
if (unlikely(open_and_lock_tables(thd, rgi->tables_to_lock, FALSE, 0)))
|
||||
{
|
||||
uint actual_error= thd->get_stmt_da()->sql_errno();
|
||||
#ifdef WITH_WSREP
|
||||
|
@ -11524,13 +11526,13 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
|
||||
error= do_exec_row(rgi);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_PRINT("info", ("error: %s", HA_ERR(error)));
|
||||
DBUG_ASSERT(error != HA_ERR_RECORD_DELETED);
|
||||
|
||||
table->in_use = old_thd;
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
int actual_error= convert_handler_error(error, thd, table);
|
||||
bool idempotent_error= (idempotent_error_code(error) &&
|
||||
|
@ -11561,12 +11563,12 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
DBUG_PRINT("info", ("curr_row: %p; curr_row_end: %p; rows_end:%p",
|
||||
m_curr_row, m_curr_row_end, m_rows_end));
|
||||
|
||||
if (!m_curr_row_end && !error)
|
||||
if (!m_curr_row_end && likely(!error))
|
||||
error= unpack_current_row(rgi);
|
||||
|
||||
m_curr_row= m_curr_row_end;
|
||||
|
||||
if (error == 0 && !transactional_table)
|
||||
if (likely(error == 0) && !transactional_table)
|
||||
thd->transaction.all.modified_non_trans_table=
|
||||
thd->transaction.stmt.modified_non_trans_table= TRUE;
|
||||
} // row processing loop
|
||||
|
@ -11588,7 +11590,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
|
||||
}
|
||||
|
||||
if ((error= do_after_row_operations(rli, error)) &&
|
||||
if (unlikely(error= do_after_row_operations(rli, error)) &&
|
||||
ignored_error_code(convert_handler_error(error, thd, table)))
|
||||
{
|
||||
|
||||
|
@ -11602,7 +11604,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
} // if (table)
|
||||
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
slave_rows_error_report(ERROR_LEVEL, error, rgi, thd, table,
|
||||
get_type_str(),
|
||||
|
@ -11631,7 +11633,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
}
|
||||
#endif /* WITH_WSREP && HAVE_QUERY_CACHE */
|
||||
|
||||
if (get_flags(STMT_END_F) && (error= rows_event_stmt_cleanup(rgi, thd)))
|
||||
if (unlikely(get_flags(STMT_END_F) &&
|
||||
(error= rows_event_stmt_cleanup(rgi, thd))))
|
||||
slave_rows_error_report(ERROR_LEVEL,
|
||||
thd->is_error() ? 0 : error,
|
||||
rgi, thd, table,
|
||||
|
@ -12988,7 +12991,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability *
|
|||
ultimately. Still todo: fix
|
||||
*/
|
||||
}
|
||||
if ((local_error= m_table->file->ha_end_bulk_insert()))
|
||||
if (unlikely((local_error= m_table->file->ha_end_bulk_insert())))
|
||||
{
|
||||
m_table->file->print_error(local_error, MYF(0));
|
||||
}
|
||||
|
@ -13106,7 +13109,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
prepare_record(table, m_width, true);
|
||||
|
||||
/* unpack row into table->record[0] */
|
||||
if ((error= unpack_current_row(rgi)))
|
||||
if (unlikely((error= unpack_current_row(rgi))))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -13146,9 +13149,9 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
DBUG_PRINT_BITSET("debug", "read_set: %s", table->read_set);
|
||||
|
||||
if (invoke_triggers &&
|
||||
process_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE, TRUE))
|
||||
unlikely(process_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE, TRUE)))
|
||||
{
|
||||
DBUG_RETURN(HA_ERR_GENERIC); // in case if error is not set yet
|
||||
DBUG_RETURN(HA_ERR_GENERIC); // in case if error is not set yet
|
||||
}
|
||||
|
||||
// Handle INSERT.
|
||||
|
@ -13172,7 +13175,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
|
||||
if (table->s->sequence)
|
||||
error= update_sequence();
|
||||
else while ((error= table->file->ha_write_row(table->record[0])))
|
||||
else while (unlikely(error= table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (error == HA_ERR_LOCK_DEADLOCK ||
|
||||
error == HA_ERR_LOCK_WAIT_TIMEOUT ||
|
||||
|
@ -13204,7 +13207,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
{
|
||||
DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
|
||||
error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -13239,7 +13242,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
(const uchar*)key.get(),
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("index_read_idx() returns %s", HA_ERR(error)));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -13314,18 +13317,20 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
{
|
||||
DBUG_PRINT("info",("Deleting offending row and trying to write new one again"));
|
||||
if (invoke_triggers &&
|
||||
process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, TRUE))
|
||||
unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE,
|
||||
TRUE)))
|
||||
error= HA_ERR_GENERIC; // in case if error is not set yet
|
||||
else
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
{
|
||||
DBUG_PRINT("info",("ha_delete_row() returns error %d",error));
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if (invoke_triggers &&
|
||||
process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, TRUE))
|
||||
unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER,
|
||||
TRUE)))
|
||||
DBUG_RETURN(HA_ERR_GENERIC); // in case if error is not set yet
|
||||
}
|
||||
/* Will retry ha_write_row() with the offending row removed. */
|
||||
|
@ -13333,7 +13338,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
|||
}
|
||||
|
||||
if (invoke_triggers &&
|
||||
process_triggers(TRG_EVENT_INSERT, TRG_ACTION_AFTER, TRUE))
|
||||
unlikely(process_triggers(TRG_EVENT_INSERT, TRG_ACTION_AFTER, TRUE)))
|
||||
error= HA_ERR_GENERIC; // in case if error is not set yet
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -13387,7 +13392,7 @@ Write_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
error= write_row(rgi, slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT);
|
||||
thd_proc_info(thd, tmp);
|
||||
|
||||
if (error && !thd->is_error())
|
||||
if (unlikely(error) && unlikely(!thd->is_error()))
|
||||
{
|
||||
DBUG_ASSERT(0);
|
||||
my_error(ER_UNKNOWN_ERROR, MYF(0));
|
||||
|
@ -13738,7 +13743,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||
DBUG_RETURN(error);
|
||||
|
||||
error= table->file->ha_rnd_pos_by_record(table->record[0]);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("rnd_pos returns error %d",error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -13802,9 +13807,10 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||
table->record[0][table->s->null_bytes - 1]|=
|
||||
256U - (1U << table->s->last_null_bit_pos);
|
||||
|
||||
if ((error= table->file->ha_index_read_map(table->record[0], m_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
if (unlikely((error= table->file->ha_index_read_map(table->record[0],
|
||||
m_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))))
|
||||
{
|
||||
DBUG_PRINT("info",("no record matching the key found in the table"));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -13885,7 +13891,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||
while ((error= table->file->ha_index_next(table->record[0])))
|
||||
{
|
||||
/* We just skip records that has already been deleted */
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
if (unlikely(error == HA_ERR_RECORD_DELETED))
|
||||
continue;
|
||||
DBUG_PRINT("info",("no record matching the given row found"));
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -13901,7 +13907,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||
DBUG_EXECUTE_IF("slave_crash_if_table_scan", abort(););
|
||||
|
||||
/* We don't have a key: search the table using rnd_next() */
|
||||
if ((error= table->file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
|
||||
{
|
||||
DBUG_PRINT("info",("error initializing table scan"
|
||||
" (ha_rnd_init returns %d)",error));
|
||||
|
@ -13916,7 +13922,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
|
|||
restart_rnd_next:
|
||||
error= table->file->ha_rnd_next(table->record[0]);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_PRINT("info", ("error: %s", HA_ERR(error)));
|
||||
switch (error) {
|
||||
|
||||
|
@ -14066,7 +14072,7 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
#endif /* WSREP_PROC_INFO */
|
||||
|
||||
thd_proc_info(thd, message);
|
||||
if (!(error= find_row(rgi)))
|
||||
if (likely(!(error= find_row(rgi))))
|
||||
{
|
||||
/*
|
||||
Delete the record found, located in record[0]
|
||||
|
@ -14081,9 +14087,9 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
thd_proc_info(thd, message);
|
||||
|
||||
if (invoke_triggers &&
|
||||
process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, FALSE))
|
||||
unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_BEFORE, FALSE)))
|
||||
error= HA_ERR_GENERIC; // in case if error is not set yet
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
m_table->mark_columns_per_binlog_row_image();
|
||||
if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP))
|
||||
|
@ -14101,8 +14107,8 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
}
|
||||
m_table->default_column_bitmaps();
|
||||
}
|
||||
if (invoke_triggers && !error &&
|
||||
process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, FALSE))
|
||||
if (invoke_triggers && likely(!error) &&
|
||||
unlikely(process_triggers(TRG_EVENT_DELETE, TRG_ACTION_AFTER, FALSE)))
|
||||
error= HA_ERR_GENERIC; // in case if error is not set yet
|
||||
m_table->file->ha_index_or_rnd_end();
|
||||
}
|
||||
|
@ -14287,7 +14293,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
|
||||
thd_proc_info(thd, message);
|
||||
int error= find_row(rgi);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
/*
|
||||
We need to read the second image in the event of error to be
|
||||
|
@ -14323,7 +14329,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
|
||||
/* this also updates m_curr_row_end */
|
||||
thd_proc_info(thd, message);
|
||||
if ((error= unpack_current_row(rgi, &m_cols_ai)))
|
||||
if (unlikely((error= unpack_current_row(rgi, &m_cols_ai))))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
|
@ -14350,7 +14356,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
|
||||
thd_proc_info(thd, message);
|
||||
if (invoke_triggers &&
|
||||
process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE, TRUE))
|
||||
unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE, TRUE)))
|
||||
{
|
||||
error= HA_ERR_GENERIC; // in case if error is not set yet
|
||||
goto err;
|
||||
|
@ -14364,7 +14370,7 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP))
|
||||
m_table->vers_update_fields();
|
||||
error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]);
|
||||
if (error == HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
|
||||
error= 0;
|
||||
if (m_vers_from_plain && m_table->versioned(VERS_TIMESTAMP))
|
||||
{
|
||||
|
@ -14374,8 +14380,8 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
|
|||
}
|
||||
m_table->default_column_bitmaps();
|
||||
|
||||
if (invoke_triggers && !error &&
|
||||
process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE))
|
||||
if (invoke_triggers && likely(!error) &&
|
||||
unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)))
|
||||
error= HA_ERR_GENERIC; // in case if error is not set yet
|
||||
|
||||
thd_proc_info(thd, tmp);
|
||||
|
|
|
@ -5034,7 +5034,8 @@ public:
|
|||
DBUG_ENTER("Incident_log_event::Incident_log_event");
|
||||
DBUG_PRINT("enter", ("m_incident: %d", m_incident));
|
||||
m_message.length= 0;
|
||||
if (!(m_message.str= (char*) my_malloc(msg->length+1, MYF(MY_WME))))
|
||||
if (unlikely(!(m_message.str= (char*) my_malloc(msg->length+1,
|
||||
MYF(MY_WME)))))
|
||||
{
|
||||
/* Mark this event invalid */
|
||||
m_incident= INCIDENT_NONE;
|
||||
|
|
|
@ -99,7 +99,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
|
|||
*/
|
||||
ev_thd->lex->set_stmt_row_injection();
|
||||
|
||||
if (open_and_lock_tables(ev_thd, rgi->tables_to_lock, FALSE, 0))
|
||||
if (unlikely(open_and_lock_tables(ev_thd, rgi->tables_to_lock, FALSE, 0)))
|
||||
{
|
||||
uint actual_error= ev_thd->get_stmt_da()->sql_errno();
|
||||
if (ev_thd->is_slave_error || ev_thd->is_fatal_error)
|
||||
|
@ -228,7 +228,8 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
|
|||
while (error == 0 && row_start < ev->m_rows_end)
|
||||
{
|
||||
uchar const *row_end= NULL;
|
||||
if ((error= do_prepare_row(ev_thd, rgi, table, row_start, &row_end)))
|
||||
if (unlikely((error= do_prepare_row(ev_thd, rgi, table, row_start,
|
||||
&row_end))))
|
||||
break; // We should perform the after-row operation even in
|
||||
// the case of error
|
||||
|
||||
|
@ -267,7 +268,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
|
|||
error= do_after_row_operations(table, error);
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{ /* error has occurred during the transaction */
|
||||
rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
|
||||
"Error in %s event: error during transaction execution "
|
||||
|
@ -478,14 +479,14 @@ replace_record(THD *thd, TABLE *table,
|
|||
DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set);
|
||||
#endif
|
||||
|
||||
while ((error= table->file->ha_write_row(table->record[0])))
|
||||
while (unlikely(error= table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
{
|
||||
table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if ((keynum= table->file->get_dup_key(error)) < 0)
|
||||
if (unlikely((keynum= table->file->get_dup_key(error)) < 0))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
/*
|
||||
|
@ -509,7 +510,7 @@ replace_record(THD *thd, TABLE *table,
|
|||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||
{
|
||||
error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -520,7 +521,7 @@ replace_record(THD *thd, TABLE *table,
|
|||
}
|
||||
else
|
||||
{
|
||||
if (table->file->extra(HA_EXTRA_FLUSH_CACHE))
|
||||
if (unlikely(table->file->extra(HA_EXTRA_FLUSH_CACHE)))
|
||||
{
|
||||
DBUG_RETURN(my_errno);
|
||||
}
|
||||
|
@ -528,7 +529,7 @@ replace_record(THD *thd, TABLE *table,
|
|||
if (key.get() == NULL)
|
||||
{
|
||||
key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
|
||||
if (key.get() == NULL)
|
||||
if (unlikely(key.get() == NULL))
|
||||
DBUG_RETURN(ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -538,7 +539,7 @@ replace_record(THD *thd, TABLE *table,
|
|||
(const uchar*)key.get(),
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info", ("index_read_idx() returns error %d", error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -578,7 +579,7 @@ replace_record(THD *thd, TABLE *table,
|
|||
{
|
||||
error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0]);
|
||||
if (error && error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
table->file->print_error(error, MYF(0));
|
||||
else
|
||||
error= 0;
|
||||
|
@ -586,7 +587,7 @@ replace_record(THD *thd, TABLE *table,
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -672,7 +673,8 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
|
|||
{
|
||||
int error;
|
||||
/* We have a key: search the table using the index */
|
||||
if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
|
||||
if (!table->file->inited &&
|
||||
unlikely(error= table->file->ha_index_init(0, FALSE)))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -696,9 +698,9 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
|
|||
my_ptrdiff_t const pos=
|
||||
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
|
||||
table->record[1][pos]= 0xFF;
|
||||
if ((error= table->file->ha_index_read_map(table->record[1], key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
if (unlikely((error= table->file->ha_index_read_map(table->record[1], key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
table->file->ha_index_end();
|
||||
|
@ -740,7 +742,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
|
|||
while ((error= table->file->ha_index_next(table->record[1])))
|
||||
{
|
||||
/* We just skip records that has already been deleted */
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
if (unlikely(error == HA_ERR_RECORD_DELETED))
|
||||
continue;
|
||||
table->file->print_error(error, MYF(0));
|
||||
table->file->ha_index_end();
|
||||
|
@ -759,7 +761,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
|
|||
int error;
|
||||
|
||||
/* We don't have a key: search the table using rnd_next() */
|
||||
if ((error= table->file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
|
||||
return error;
|
||||
|
||||
/* Continue until we find the right record or have made a full loop */
|
||||
|
@ -786,7 +788,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
|
|||
if (++restart_count < 2)
|
||||
{
|
||||
int error2;
|
||||
if ((error2= table->file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely((error2= table->file->ha_rnd_init_with_error(1))))
|
||||
DBUG_RETURN(error2);
|
||||
}
|
||||
break;
|
||||
|
@ -854,7 +856,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
|
|||
fires bug#27077
|
||||
todo: explain or fix
|
||||
*/
|
||||
if ((local_error= table->file->ha_end_bulk_insert()))
|
||||
if (unlikely((local_error= table->file->ha_end_bulk_insert())))
|
||||
{
|
||||
table->file->print_error(local_error, MYF(0));
|
||||
}
|
||||
|
@ -986,7 +988,7 @@ int Delete_rows_log_event_old::do_exec_row(TABLE *table)
|
|||
int error;
|
||||
DBUG_ASSERT(table != NULL);
|
||||
|
||||
if (!(error= ::find_and_fetch_row(table, m_key)))
|
||||
if (likely(!(error= ::find_and_fetch_row(table, m_key))))
|
||||
{
|
||||
/*
|
||||
Now we should have the right row to delete. We are using
|
||||
|
@ -1095,7 +1097,7 @@ int Update_rows_log_event_old::do_exec_row(TABLE *table)
|
|||
DBUG_ASSERT(table != NULL);
|
||||
|
||||
int error= ::find_and_fetch_row(table, m_key);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
|
||||
/*
|
||||
|
@ -1121,7 +1123,7 @@ int Update_rows_log_event_old::do_exec_row(TABLE *table)
|
|||
database into the after image delivered from the master.
|
||||
*/
|
||||
error= table->file->ha_update_row(table->record[1], table->record[0]);
|
||||
if (error == HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
|
||||
error= 0;
|
||||
|
||||
return error;
|
||||
|
@ -1417,8 +1419,8 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
*/
|
||||
lex_start(thd);
|
||||
|
||||
if ((error= lock_tables(thd, rgi->tables_to_lock,
|
||||
rgi->tables_to_lock_count, 0)))
|
||||
if (unlikely((error= lock_tables(thd, rgi->tables_to_lock,
|
||||
rgi->tables_to_lock_count, 0))))
|
||||
{
|
||||
if (thd->is_slave_error || thd->is_fatal_error)
|
||||
{
|
||||
|
@ -1610,7 +1612,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
DBUG_PRINT("info", ("curr_row: %p; curr_row_end:%p; rows_end: %p",
|
||||
m_curr_row, m_curr_row_end, m_rows_end));
|
||||
|
||||
if (!m_curr_row_end && !error)
|
||||
if (!m_curr_row_end && likely(!error))
|
||||
unpack_current_row(rgi);
|
||||
|
||||
// at this moment m_curr_row_end should be set
|
||||
|
@ -1627,7 +1629,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
error= do_after_row_operations(rli, error);
|
||||
} // if (table)
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{ /* error has occurred during the transaction */
|
||||
rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
|
||||
"Error in %s event: error during transaction execution "
|
||||
|
@ -1711,7 +1713,9 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
|
|||
already. So there should be no need to rollback the transaction.
|
||||
*/
|
||||
DBUG_ASSERT(! thd->transaction_rollback_request);
|
||||
if ((error= (binlog_error ? trans_rollback_stmt(thd) : trans_commit_stmt(thd))))
|
||||
if (unlikely((error= (binlog_error ?
|
||||
trans_rollback_stmt(thd) :
|
||||
trans_commit_stmt(thd)))))
|
||||
rli->report(ERROR_LEVEL, error, NULL,
|
||||
"Error in %s event: commit of row events failed, "
|
||||
"table `%s`.`%s`",
|
||||
|
@ -1932,8 +1936,9 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
|
|||
|
||||
/* fill table->record[0] with default values */
|
||||
|
||||
if ((error= prepare_record(table, m_width,
|
||||
TRUE /* check if columns have def. values */)))
|
||||
if (unlikely((error=
|
||||
prepare_record(table, m_width,
|
||||
TRUE /* check if columns have def. values */))))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
/* unpack row into table->record[0] */
|
||||
|
@ -1954,14 +1959,14 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
|
|||
TODO: Add safety measures against infinite looping.
|
||||
*/
|
||||
|
||||
while ((error= table->file->ha_write_row(table->record[0])))
|
||||
while (unlikely(error= table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
{
|
||||
table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if ((keynum= table->file->get_dup_key(error)) < 0)
|
||||
if (unlikely((keynum= table->file->get_dup_key(error)) < 0))
|
||||
{
|
||||
DBUG_PRINT("info",("Can't locate duplicate key (get_dup_key returns %d)",keynum));
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -1987,7 +1992,7 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
|
|||
{
|
||||
DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
|
||||
error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -2009,7 +2014,7 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
|
|||
if (key.get() == NULL)
|
||||
{
|
||||
key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
|
||||
if (key.get() == NULL)
|
||||
if (unlikely(key.get() == NULL))
|
||||
{
|
||||
DBUG_PRINT("info",("Can't allocate key buffer"));
|
||||
DBUG_RETURN(ENOMEM);
|
||||
|
@ -2022,7 +2027,7 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
|
|||
(const uchar*)key.get(),
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("index_read_idx() returns error %d", error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -2095,7 +2100,7 @@ Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
|
|||
else
|
||||
{
|
||||
DBUG_PRINT("info",("Deleting offending row and trying to write new one again"));
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
{
|
||||
DBUG_PRINT("info",("ha_delete_row() returns error %d",error));
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -2183,7 +2188,7 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
|
|||
*/
|
||||
DBUG_PRINT("info",("locating record using primary key (position)"));
|
||||
int error= table->file->ha_rnd_pos_by_record(table->record[0]);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info",("rnd_pos returns error %d",error));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -2212,7 +2217,8 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
|
|||
DBUG_PRINT("info",("locating record using primary key (index_read)"));
|
||||
|
||||
/* We have a key: search the table using the index */
|
||||
if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
|
||||
if (!table->file->inited &&
|
||||
unlikely(error= table->file->ha_index_init(0, FALSE)))
|
||||
{
|
||||
DBUG_PRINT("info",("ha_index_init returns error %d",error));
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -2242,9 +2248,10 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
|
|||
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
|
||||
table->record[0][pos]= 0xFF;
|
||||
|
||||
if ((error= table->file->ha_index_read_map(table->record[0], m_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
if (unlikely((error= table->file->ha_index_read_map(table->record[0],
|
||||
m_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))))
|
||||
{
|
||||
DBUG_PRINT("info",("no record matching the key found in the table"));
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -2316,7 +2323,7 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
|
|||
|
||||
while (record_compare(table))
|
||||
{
|
||||
while ((error= table->file->ha_index_next(table->record[0])))
|
||||
while (unlikely(error= table->file->ha_index_next(table->record[0])))
|
||||
{
|
||||
/* We just skip records that has already been deleted */
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
|
@ -2335,7 +2342,7 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
|
|||
int restart_count= 0; // Number of times scanning has restarted from top
|
||||
|
||||
/* We don't have a key: search the table using rnd_next() */
|
||||
if ((error= table->file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
|
||||
{
|
||||
DBUG_PRINT("info",("error initializing table scan"
|
||||
" (ha_rnd_init returns %d)",error));
|
||||
|
@ -2361,7 +2368,7 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
|
|||
{
|
||||
int error2;
|
||||
table->file->ha_rnd_end();
|
||||
if ((error2= table->file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely((error2= table->file->ha_rnd_init_with_error(1))))
|
||||
DBUG_RETURN(error2);
|
||||
goto restart_rnd_next;
|
||||
}
|
||||
|
@ -2480,7 +2487,7 @@ Write_rows_log_event_old::do_after_row_operations(const Slave_reporting_capabili
|
|||
fires bug#27077
|
||||
todo: explain or fix
|
||||
*/
|
||||
if ((local_error= m_table->file->ha_end_bulk_insert()))
|
||||
if (unlikely((local_error= m_table->file->ha_end_bulk_insert())))
|
||||
{
|
||||
m_table->file->print_error(local_error, MYF(0));
|
||||
}
|
||||
|
@ -2494,7 +2501,7 @@ Write_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
|
|||
DBUG_ASSERT(m_table != NULL);
|
||||
int error= write_row(rgi, TRUE /* overwrite */);
|
||||
|
||||
if (error && !thd->net.last_errno)
|
||||
if (unlikely(error) && !thd->net.last_errno)
|
||||
thd->net.last_errno= error;
|
||||
|
||||
return error;
|
||||
|
@ -2597,7 +2604,7 @@ int Delete_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
|
|||
int error;
|
||||
DBUG_ASSERT(m_table != NULL);
|
||||
|
||||
if (!(error= find_row(rgi)))
|
||||
if (likely(!(error= find_row(rgi))) )
|
||||
{
|
||||
/*
|
||||
Delete the record found, located in record[0]
|
||||
|
@ -2697,7 +2704,7 @@ Update_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
|
|||
DBUG_ASSERT(m_table != NULL);
|
||||
|
||||
int error= find_row(rgi);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
/*
|
||||
We need to read the second image in the event of error to be
|
||||
|
@ -2741,7 +2748,7 @@ Update_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
|
|||
error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]);
|
||||
m_table->file->ha_index_or_rnd_end();
|
||||
|
||||
if (error == HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
|
||||
error= 0;
|
||||
|
||||
return error;
|
||||
|
|
|
@ -58,12 +58,12 @@ int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t)
|
|||
if (!info->end_of_file)
|
||||
DBUG_RETURN(1); /* because my_b_get (no _) takes 1 byte at a time */
|
||||
read_length= my_net_read_packet(net, 0);
|
||||
if (read_length == packet_error)
|
||||
if (unlikely(read_length == packet_error))
|
||||
{
|
||||
info->error= -1;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
if (read_length == 0)
|
||||
if (unlikely(read_length == 0))
|
||||
{
|
||||
info->end_of_file= 0; /* End of file from client */
|
||||
DBUG_RETURN(1);
|
||||
|
|
|
@ -1957,10 +1957,11 @@ void kill_mysql(THD *thd)
|
|||
pthread_t tmp;
|
||||
int error;
|
||||
abort_loop=1;
|
||||
if ((error= mysql_thread_create(0, /* Not instrumented */
|
||||
&tmp, &connection_attrib,
|
||||
kill_server_thread, (void*) 0)))
|
||||
sql_print_error("Can't create thread to kill server (errno= %d).", error);
|
||||
if (unlikely((error= mysql_thread_create(0, /* Not instrumented */
|
||||
&tmp, &connection_attrib,
|
||||
kill_server_thread, (void*) 0))))
|
||||
sql_print_error("Can't create thread to kill server (errno= %d).",
|
||||
error);
|
||||
}
|
||||
#endif
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -2569,7 +2570,7 @@ static MYSQL_SOCKET activate_tcp_port(uint port)
|
|||
|
||||
my_snprintf(port_buf, NI_MAXSERV, "%d", port);
|
||||
error= getaddrinfo(real_bind_addr_str, port_buf, &hints, &ai);
|
||||
if (error != 0)
|
||||
if (unlikely(error != 0))
|
||||
{
|
||||
DBUG_PRINT("error",("Got error: %d from getaddrinfo()", error));
|
||||
|
||||
|
@ -3472,8 +3473,9 @@ static void start_signal_handler(void)
|
|||
(void) my_setstacksize(&thr_attr,my_thread_stack_size);
|
||||
|
||||
mysql_mutex_lock(&LOCK_start_thread);
|
||||
if ((error= mysql_thread_create(key_thread_signal_hand,
|
||||
&signal_thread, &thr_attr, signal_hand, 0)))
|
||||
if (unlikely((error= mysql_thread_create(key_thread_signal_hand,
|
||||
&signal_thread, &thr_attr,
|
||||
signal_hand, 0))))
|
||||
{
|
||||
sql_print_error("Can't create interrupt-thread (error %d, errno: %d)",
|
||||
error,errno);
|
||||
|
@ -3575,10 +3577,10 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
|
|||
PSI_CALL_delete_current_thread();
|
||||
#ifdef USE_ONE_SIGNAL_HAND
|
||||
pthread_t tmp;
|
||||
if ((error= mysql_thread_create(0, /* Not instrumented */
|
||||
&tmp, &connection_attrib,
|
||||
kill_server_thread,
|
||||
(void*) &sig)))
|
||||
if (unlikely((error= mysql_thread_create(0, /* Not instrumented */
|
||||
&tmp, &connection_attrib,
|
||||
kill_server_thread,
|
||||
(void*) &sig))))
|
||||
sql_print_error("Can't create thread to kill server (errno= %d)",
|
||||
error);
|
||||
#else
|
||||
|
@ -3670,9 +3672,9 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
|
|||
func= sql_print_error;
|
||||
}
|
||||
|
||||
if (thd)
|
||||
if (likely(thd))
|
||||
{
|
||||
if (MyFlags & ME_FATALERROR)
|
||||
if (unlikely(MyFlags & ME_FATALERROR))
|
||||
thd->is_fatal_error= 1;
|
||||
(void) thd->raise_condition(error, NULL, level, str);
|
||||
}
|
||||
|
@ -3682,7 +3684,7 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
|
|||
/* When simulating OOM, skip writing to error log to avoid mtr errors */
|
||||
DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_VOID_RETURN;);
|
||||
|
||||
if (!thd || thd->log_all_errors || (MyFlags & ME_NOREFRESH))
|
||||
if (unlikely(!thd) || thd->log_all_errors || (MyFlags & ME_NOREFRESH))
|
||||
(*func)("%s: %s", my_progname_short, str); /* purecov: inspected */
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -4088,7 +4090,7 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
|
|||
thd->status_var.local_memory_used);
|
||||
if (size > 0 &&
|
||||
thd->status_var.local_memory_used > (int64)thd->variables.max_mem_used &&
|
||||
!thd->killed && !thd->get_stmt_da()->is_set())
|
||||
likely(!thd->killed) && !thd->get_stmt_da()->is_set())
|
||||
{
|
||||
/* Ensure we don't get called here again */
|
||||
char buf[50], *buf2;
|
||||
|
@ -5364,7 +5366,7 @@ static int init_server_components()
|
|||
init_global_index_stats();
|
||||
|
||||
/* Allow storage engine to give real error messages */
|
||||
if (ha_init_errors())
|
||||
if (unlikely(ha_init_errors()))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
tc_log= 0; // ha_initialize_handlerton() needs that
|
||||
|
@ -5551,7 +5553,7 @@ static int init_server_components()
|
|||
error= mysql_bin_log.open(opt_bin_logname, LOG_BIN, 0, 0,
|
||||
WRITE_CACHE, max_binlog_size, 0, TRUE);
|
||||
mysql_mutex_unlock(log_lock);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
unireg_abort(1);
|
||||
}
|
||||
|
||||
|
@ -5585,7 +5587,7 @@ static int init_server_components()
|
|||
else
|
||||
error= mlockall(MCL_CURRENT);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (global_system_variables.log_warnings)
|
||||
sql_print_warning("Failed to lock memory. Errno: %d\n",errno);
|
||||
|
@ -5617,9 +5619,9 @@ static void create_shutdown_thread()
|
|||
hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name);
|
||||
pthread_t hThread;
|
||||
int error;
|
||||
if ((error= mysql_thread_create(key_thread_handle_shutdown,
|
||||
&hThread, &connection_attrib,
|
||||
handle_shutdown, 0)))
|
||||
if (unlikely((error= mysql_thread_create(key_thread_handle_shutdown,
|
||||
&hThread, &connection_attrib,
|
||||
handle_shutdown, 0))))
|
||||
sql_print_warning("Can't create thread to handle shutdown requests"
|
||||
" (errno= %d)", error);
|
||||
|
||||
|
@ -9551,7 +9553,7 @@ mysql_getopt_value(const char *name, uint length,
|
|||
case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE:
|
||||
{
|
||||
KEY_CACHE *key_cache;
|
||||
if (!(key_cache= get_or_create_key_cache(name, length)))
|
||||
if (unlikely(!(key_cache= get_or_create_key_cache(name, length))))
|
||||
{
|
||||
if (error)
|
||||
*error= EXIT_OUT_OF_MEMORY;
|
||||
|
|
|
@ -112,12 +112,12 @@ extern void query_cache_insert(void *thd, const char *packet, size_t length,
|
|||
unsigned pkt_nr);
|
||||
#endif // HAVE_QUERY_CACHE
|
||||
#define update_statistics(A) A
|
||||
extern my_bool thd_net_is_killed();
|
||||
extern my_bool thd_net_is_killed(THD *thd);
|
||||
/* Additional instrumentation hooks for the server */
|
||||
#include "mysql_com_server.h"
|
||||
#else
|
||||
#define update_statistics(A)
|
||||
#define thd_net_is_killed() 0
|
||||
#define thd_net_is_killed(A) 0
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -620,7 +620,7 @@ net_real_write(NET *net,const uchar *packet, size_t len)
|
|||
query_cache_insert(net->thd, (char*) packet, len, net->pkt_nr);
|
||||
#endif
|
||||
|
||||
if (net->error == 2)
|
||||
if (unlikely(net->error == 2))
|
||||
DBUG_RETURN(-1); /* socket can't be used */
|
||||
|
||||
net->reading_or_writing=2;
|
||||
|
@ -960,7 +960,7 @@ retry:
|
|||
DBUG_PRINT("info",("vio_read returned %ld errno: %d",
|
||||
(long) length, vio_errno(net->vio)));
|
||||
|
||||
if (i== 0 && thd_net_is_killed())
|
||||
if (i== 0 && unlikely(thd_net_is_killed((THD*) net->thd)))
|
||||
{
|
||||
DBUG_PRINT("info", ("thd is killed"));
|
||||
len= packet_error;
|
||||
|
@ -1246,13 +1246,13 @@ my_net_read_packet_reallen(NET *net, my_bool read_from_server, ulong* reallen)
|
|||
total_length += len;
|
||||
len = my_real_read(net,&complen, 0);
|
||||
} while (len == MAX_PACKET_LENGTH);
|
||||
if (len != packet_error)
|
||||
if (likely(len != packet_error))
|
||||
len+= total_length;
|
||||
net->where_b = save_pos;
|
||||
}
|
||||
|
||||
net->read_pos = net->buff + net->where_b;
|
||||
if (len != packet_error)
|
||||
if (likely(len != packet_error))
|
||||
{
|
||||
net->read_pos[len]=0; /* Safeguard for mysql_use_result */
|
||||
*reallen = (ulong)len;
|
||||
|
|
|
@ -1595,7 +1595,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler,
|
|||
selects.
|
||||
*/
|
||||
int error= quick->init_ror_merged_scan(TRUE, local_alloc);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(error);
|
||||
quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS);
|
||||
}
|
||||
|
@ -1619,7 +1619,8 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler,
|
|||
quick->record= head->record[0];
|
||||
}
|
||||
|
||||
if (need_to_fetch_row && head->file->ha_rnd_init_with_error(false))
|
||||
if (need_to_fetch_row &&
|
||||
unlikely(head->file->ha_rnd_init_with_error(false)))
|
||||
{
|
||||
DBUG_PRINT("error", ("ROR index_merge rnd_init call failed"));
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1793,9 +1794,9 @@ int QUICK_ROR_UNION_SELECT::reset()
|
|||
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
|
||||
while ((quick= it++))
|
||||
{
|
||||
if ((error= quick->reset()))
|
||||
if (unlikely((error= quick->reset())))
|
||||
DBUG_RETURN(error);
|
||||
if ((error= quick->get_next()))
|
||||
if (unlikely((error= quick->get_next())))
|
||||
{
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
continue;
|
||||
|
@ -1805,12 +1806,12 @@ int QUICK_ROR_UNION_SELECT::reset()
|
|||
queue_insert(&queue, (uchar*)quick);
|
||||
}
|
||||
/* Prepare for ha_rnd_pos calls. */
|
||||
if (head->file->inited && (error= head->file->ha_rnd_end()))
|
||||
if (head->file->inited && unlikely((error= head->file->ha_rnd_end())))
|
||||
{
|
||||
DBUG_PRINT("error", ("ROR index_merge rnd_end call failed"));
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if ((error= head->file->ha_rnd_init(false)))
|
||||
if (unlikely((error= head->file->ha_rnd_init(false))))
|
||||
{
|
||||
DBUG_PRINT("error", ("ROR index_merge rnd_init call failed"));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -10835,8 +10836,9 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
|
|||
goto err;
|
||||
quick->records= records;
|
||||
|
||||
if ((cp_buffer_from_ref(thd, table, ref) && thd->is_fatal_error) ||
|
||||
!(range= new(alloc) QUICK_RANGE()))
|
||||
if ((cp_buffer_from_ref(thd, table, ref) &&
|
||||
unlikely(thd->is_fatal_error)) ||
|
||||
unlikely(!(range= new(alloc) QUICK_RANGE())))
|
||||
goto err; // out of memory
|
||||
|
||||
range->min_key= range->max_key= ref->key_buff;
|
||||
|
@ -10845,8 +10847,8 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
|
|||
make_prev_keypart_map(ref->key_parts);
|
||||
range->flag= EQ_RANGE;
|
||||
|
||||
if (!(quick->key_parts=key_part=(KEY_PART *)
|
||||
alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts)))
|
||||
if (unlikely(!(quick->key_parts=key_part=(KEY_PART *)
|
||||
alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts))))
|
||||
goto err;
|
||||
|
||||
max_used_key_len=0;
|
||||
|
@ -11178,7 +11180,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
|
|||
error= quick->get_next();
|
||||
}
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
/* Save the read key tuple */
|
||||
|
@ -11203,7 +11205,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
|
|||
{
|
||||
DBUG_EXECUTE_IF("innodb_quick_report_deadlock",
|
||||
DBUG_SET("+d,innodb_report_deadlock"););
|
||||
if ((error= quick->get_next()))
|
||||
if (unlikely((error= quick->get_next())))
|
||||
{
|
||||
/* On certain errors like deadlock, trx might be rolled back.*/
|
||||
if (!thd->transaction_rollback_request)
|
||||
|
@ -11231,7 +11233,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
|
|||
while (!cpk_quick->row_in_ranges())
|
||||
{
|
||||
quick->file->unlock_row(); /* row not in range; unlock */
|
||||
if ((error= quick->get_next()))
|
||||
if (unlikely((error= quick->get_next())))
|
||||
{
|
||||
/* On certain errors like deadlock, trx might be rolled back.*/
|
||||
if (!thd->transaction_rollback_request)
|
||||
|
@ -11363,7 +11365,7 @@ int QUICK_RANGE_SELECT::reset()
|
|||
if (file->inited == handler::RND)
|
||||
{
|
||||
/* Handler could be left in this state by MRR */
|
||||
if ((error= file->ha_rnd_end()))
|
||||
if (unlikely((error= file->ha_rnd_end())))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
@ -11375,7 +11377,7 @@ int QUICK_RANGE_SELECT::reset()
|
|||
{
|
||||
DBUG_EXECUTE_IF("bug14365043_2",
|
||||
DBUG_SET("+d,ha_index_init_fail"););
|
||||
if ((error= file->ha_index_init(index,1)))
|
||||
if (unlikely((error= file->ha_index_init(index,1))))
|
||||
{
|
||||
file->print_error(error, MYF(0));
|
||||
goto err;
|
||||
|
@ -11718,7 +11720,7 @@ int QUICK_SELECT_DESC::get_next()
|
|||
if (last_range->flag & NO_MAX_RANGE) // Read last record
|
||||
{
|
||||
int local_error;
|
||||
if ((local_error= file->ha_index_last(record)))
|
||||
if (unlikely((local_error= file->ha_index_last(record))))
|
||||
DBUG_RETURN(local_error); // Empty table
|
||||
if (cmp_prev(last_range) == 0)
|
||||
DBUG_RETURN(0);
|
||||
|
|
|
@ -4352,7 +4352,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
|
|||
}
|
||||
}
|
||||
|
||||
if (thd->is_fatal_error) // If end of memory
|
||||
if (unlikely(thd->is_fatal_error)) // If end of memory
|
||||
goto err;
|
||||
share->db_record_offset= 1;
|
||||
table->no_rows= 1; // We don't need the data
|
||||
|
@ -4361,10 +4361,11 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
|
|||
recinfo++;
|
||||
if (share->db_type() == TMP_ENGINE_HTON)
|
||||
{
|
||||
if (create_internal_tmp_table(table, keyinfo, start_recinfo, &recinfo, 0))
|
||||
if (unlikely(create_internal_tmp_table(table, keyinfo, start_recinfo,
|
||||
&recinfo, 0)))
|
||||
goto err;
|
||||
}
|
||||
if (open_tmp_table(table))
|
||||
if (unlikely(open_tmp_table(table)))
|
||||
goto err;
|
||||
|
||||
thd->mem_root= mem_root_save;
|
||||
|
@ -4476,7 +4477,7 @@ int SJ_TMP_TABLE::sj_weedout_check_row(THD *thd)
|
|||
}
|
||||
|
||||
error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
/* create_internal_tmp_table_from_heap will generate error if needed */
|
||||
if (!tmp_table->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
|
@ -5297,7 +5298,8 @@ enum_nested_loop_state join_tab_execution_startup(JOIN_TAB *tab)
|
|||
hash_sj_engine->materialize_join->exec();
|
||||
hash_sj_engine->is_materialized= TRUE;
|
||||
|
||||
if (hash_sj_engine->materialize_join->error || tab->join->thd->is_fatal_error)
|
||||
if (unlikely(hash_sj_engine->materialize_join->error) ||
|
||||
unlikely(tab->join->thd->is_fatal_error))
|
||||
DBUG_RETURN(NESTED_LOOP_ERROR);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -316,7 +316,7 @@ int opt_sum_query(THD *thd,
|
|||
else
|
||||
{
|
||||
error= tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
|
||||
if(error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
tl->table->file->print_error(error, MYF(ME_FATALERROR));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -400,15 +400,16 @@ int opt_sum_query(THD *thd,
|
|||
}
|
||||
longlong info_limit= 1;
|
||||
table->file->info_push(INFO_KIND_FORCE_LIMIT_BEGIN, &info_limit);
|
||||
if (!(error= table->file->ha_index_init((uint) ref.key, 1)))
|
||||
if (likely(!(error= table->file->ha_index_init((uint) ref.key, 1))))
|
||||
error= (is_max ?
|
||||
get_index_max_value(table, &ref, range_fl) :
|
||||
get_index_min_value(table, &ref, item_field, range_fl,
|
||||
prefix_len));
|
||||
|
||||
/* Verify that the read tuple indeed matches the search key */
|
||||
if (!error && reckey_in_range(is_max, &ref, item_field->field,
|
||||
conds, range_fl, prefix_len))
|
||||
if (!error &&
|
||||
reckey_in_range(is_max, &ref, item_field->field,
|
||||
conds, range_fl, prefix_len))
|
||||
error= HA_ERR_KEY_NOT_FOUND;
|
||||
table->file->ha_end_keyread();
|
||||
table->file->ha_index_end();
|
||||
|
@ -478,7 +479,7 @@ int opt_sum_query(THD *thd,
|
|||
}
|
||||
}
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(thd->get_stmt_da()->sql_errno());
|
||||
|
||||
/*
|
||||
|
|
|
@ -483,8 +483,7 @@ frm_error:
|
|||
my_error(ER_FPARSER_BAD_HEADER, MYF(0), file_name->str);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
else
|
||||
DBUG_RETURN(parser); // upper level have to check parser->ok()
|
||||
DBUG_RETURN(parser); // upper level have to check parser->ok()
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1698,17 +1698,19 @@ bool check_partition_dirs(partition_info *part_info)
|
|||
partition_element *subpart_elem;
|
||||
while ((subpart_elem= sub_it++))
|
||||
{
|
||||
if (error_if_data_home_dir(subpart_elem->data_file_name,
|
||||
"DATA DIRECTORY") ||
|
||||
error_if_data_home_dir(subpart_elem->index_file_name,
|
||||
"INDEX DIRECTORY"))
|
||||
if (unlikely(error_if_data_home_dir(subpart_elem->data_file_name,
|
||||
"DATA DIRECTORY")) ||
|
||||
unlikely(error_if_data_home_dir(subpart_elem->index_file_name,
|
||||
"INDEX DIRECTORY")))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (error_if_data_home_dir(part_elem->data_file_name, "DATA DIRECTORY") ||
|
||||
error_if_data_home_dir(part_elem->index_file_name, "INDEX DIRECTORY"))
|
||||
if (unlikely(error_if_data_home_dir(part_elem->data_file_name,
|
||||
"DATA DIRECTORY")) ||
|
||||
unlikely(error_if_data_home_dir(part_elem->index_file_name,
|
||||
"INDEX DIRECTORY")))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -2304,7 +2306,7 @@ bool partition_info::fix_parser_data(THD *thd)
|
|||
part_elem= it++;
|
||||
List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
|
||||
num_elements= part_elem->list_val_list.elements;
|
||||
if (!num_elements && error_if_requires_values())
|
||||
if (unlikely(!num_elements && error_if_requires_values()))
|
||||
DBUG_RETURN(true);
|
||||
DBUG_ASSERT(part_type == RANGE_PARTITION ?
|
||||
num_elements == 1U : TRUE);
|
||||
|
|
|
@ -285,7 +285,7 @@ net_send_ok(THD *thd,
|
|||
DBUG_ASSERT(store.length() <= MAX_PACKET_LENGTH);
|
||||
|
||||
error= my_net_write(net, (const unsigned char*)store.ptr(), store.length());
|
||||
if (!error && (!skip_flush || is_eof))
|
||||
if (likely(!error) && (!skip_flush || is_eof))
|
||||
error= net_flush(net);
|
||||
|
||||
thd->server_status&= ~SERVER_SESSION_STATE_CHANGED;
|
||||
|
@ -349,7 +349,7 @@ net_send_eof(THD *thd, uint server_status, uint statement_warn_count)
|
|||
{
|
||||
thd->get_stmt_da()->set_overwrite_status(true);
|
||||
error= write_eof_packet(thd, net, server_status, statement_warn_count);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= net_flush(net);
|
||||
thd->get_stmt_da()->set_overwrite_status(false);
|
||||
DBUG_PRINT("info", ("EOF sent, so no more error sending allowed"));
|
||||
|
@ -393,7 +393,7 @@ static bool write_eof_packet(THD *thd, NET *net,
|
|||
because if 'is_fatal_error' is set the server is not going to execute
|
||||
other queries (see the if test in dispatch_command / COM_QUERY)
|
||||
*/
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
server_status&= ~SERVER_MORE_RESULTS_EXISTS;
|
||||
int2store(buff + 3, server_status);
|
||||
error= my_net_write(net, buff, 5);
|
||||
|
@ -590,7 +590,7 @@ void Protocol::end_statement()
|
|||
thd->get_stmt_da()->skip_flush());
|
||||
break;
|
||||
}
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
thd->get_stmt_da()->set_is_sent(true);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -990,7 +990,7 @@ bool Protocol::send_result_set_row(List<Item> *row_items)
|
|||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
/* Item::send() may generate an error. If so, abort the loop. */
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ bool init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
|
|||
|
||||
table->status=0; /* And it's always found */
|
||||
if (!table->file->inited &&
|
||||
(error= table->file->ha_index_init(idx, 1)))
|
||||
unlikely(error= table->file->ha_index_init(idx, 1)))
|
||||
{
|
||||
if (print_error)
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -235,7 +235,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
reinit_io_cache(info->io_cache,READ_CACHE,0L,0,0);
|
||||
info->ref_pos=table->file->ref;
|
||||
if (!table->file->inited)
|
||||
if (table->file->ha_rnd_init_with_error(0))
|
||||
if (unlikely(table->file->ha_rnd_init_with_error(0)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
|
@ -272,7 +272,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
else if (filesort && filesort->record_pointers)
|
||||
{
|
||||
DBUG_PRINT("info",("using record_pointers"));
|
||||
if (table->file->ha_rnd_init_with_error(0))
|
||||
if (unlikely(table->file->ha_rnd_init_with_error(0)))
|
||||
DBUG_RETURN(1);
|
||||
info->cache_pos= filesort->record_pointers;
|
||||
info->cache_end= (info->cache_pos+
|
||||
|
@ -285,7 +285,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
int error;
|
||||
info->read_record_func= rr_index_first;
|
||||
if (!table->file->inited &&
|
||||
(error= table->file->ha_index_init(table->file->keyread, 1)))
|
||||
unlikely((error= table->file->ha_index_init(table->file->keyread, 1))))
|
||||
{
|
||||
if (print_error)
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -296,7 +296,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
|
|||
{
|
||||
DBUG_PRINT("info",("using rr_sequential"));
|
||||
info->read_record_func= rr_sequential;
|
||||
if (table->file->ha_rnd_init_with_error(1))
|
||||
if (unlikely(table->file->ha_rnd_init_with_error(1)))
|
||||
DBUG_RETURN(1);
|
||||
/* We can use record cache if we don't update dynamic length tables */
|
||||
if (!table->no_cache &&
|
||||
|
@ -642,7 +642,7 @@ static int rr_from_cache(READ_RECORD *info)
|
|||
{
|
||||
if (info->cache_pos != info->cache_end)
|
||||
{
|
||||
if (info->cache_pos[info->error_offset])
|
||||
if (unlikely(info->cache_pos[info->error_offset]))
|
||||
{
|
||||
shortget(error,info->cache_pos);
|
||||
if (info->print_error)
|
||||
|
@ -688,7 +688,8 @@ static int rr_from_cache(READ_RECORD *info)
|
|||
record=uint3korr(position);
|
||||
position+=3;
|
||||
record_pos=info->cache+record*info->reclength;
|
||||
if ((error=(int16) info->table->file->ha_rnd_pos(record_pos,info->ref_pos)))
|
||||
if (unlikely((error= (int16) info->table->file->
|
||||
ha_rnd_pos(record_pos,info->ref_pos))))
|
||||
{
|
||||
record_pos[info->error_offset]=1;
|
||||
shortstore(record_pos,error);
|
||||
|
|
|
@ -167,7 +167,7 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi)
|
|||
break;
|
||||
}
|
||||
thd= rgi->thd;
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
res= -1;
|
||||
|
@ -2602,7 +2602,7 @@ gtid_waiting::wait_for_gtid(THD *thd, rpl_gtid *wait_gtid,
|
|||
&stage_master_gtid_wait_primary, &old_stage);
|
||||
do
|
||||
{
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
break;
|
||||
else if (wait_until)
|
||||
{
|
||||
|
@ -2654,7 +2654,7 @@ gtid_waiting::wait_for_gtid(THD *thd, rpl_gtid *wait_gtid,
|
|||
&stage_master_gtid_wait, &old_stage);
|
||||
did_enter_cond= true;
|
||||
}
|
||||
while (!elem.done && !thd->check_killed())
|
||||
while (!elem.done && likely(!thd->check_killed()))
|
||||
{
|
||||
thd_wait_begin(thd, THD_WAIT_BINLOG);
|
||||
if (wait_until)
|
||||
|
|
|
@ -105,7 +105,7 @@ int injector::transaction::use_table(server_id_type sid, table tbl)
|
|||
|
||||
int error;
|
||||
|
||||
if ((error= check_state(TABLE_STATE)))
|
||||
if (unlikely((error= check_state(TABLE_STATE))))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
server_id_type save_id= m_thd->variables.server_id;
|
||||
|
@ -180,7 +180,8 @@ void injector::new_trans(THD *thd, injector::transaction *ptr)
|
|||
int injector::record_incident(THD *thd, Incident incident)
|
||||
{
|
||||
Incident_log_event ev(thd, incident);
|
||||
if (int error= mysql_bin_log.write(&ev))
|
||||
int error;
|
||||
if (unlikely((error= mysql_bin_log.write(&ev))))
|
||||
return error;
|
||||
return mysql_bin_log.rotate_and_purge(true);
|
||||
}
|
||||
|
@ -189,7 +190,8 @@ int injector::record_incident(THD *thd, Incident incident,
|
|||
const LEX_CSTRING *message)
|
||||
{
|
||||
Incident_log_event ev(thd, incident, message);
|
||||
if (int error= mysql_bin_log.write(&ev))
|
||||
int error;
|
||||
if (unlikely((error= mysql_bin_log.write(&ev))))
|
||||
return error;
|
||||
return mysql_bin_log.rotate_and_purge(true);
|
||||
}
|
||||
|
|
|
@ -678,7 +678,7 @@ file '%s')", fname);
|
|||
mi->rli.is_relay_log_recovery= FALSE;
|
||||
// now change cache READ -> WRITE - must do this before flush_master_info
|
||||
reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1);
|
||||
if ((error= MY_TEST(flush_master_info(mi, TRUE, TRUE))))
|
||||
if (unlikely((error= MY_TEST(flush_master_info(mi, TRUE, TRUE)))))
|
||||
sql_print_error("Failed to flush master info file");
|
||||
mysql_mutex_unlock(&mi->data_lock);
|
||||
DBUG_RETURN(error);
|
||||
|
@ -1649,7 +1649,7 @@ bool Master_info_index::start_all_slaves(THD *thd)
|
|||
error= start_slave(thd, mi, 1);
|
||||
mi->release();
|
||||
mysql_mutex_lock(&LOCK_active_mi);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_error(ER_CANT_START_STOP_SLAVE, MYF(0),
|
||||
"START",
|
||||
|
@ -1722,7 +1722,7 @@ bool Master_info_index::stop_all_slaves(THD *thd)
|
|||
error= stop_slave(thd, mi, 1);
|
||||
mi->release();
|
||||
mysql_mutex_lock(&LOCK_active_mi);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_error(ER_CANT_START_STOP_SLAVE, MYF(0),
|
||||
"STOP",
|
||||
|
@ -2021,7 +2021,7 @@ bool Master_info_index::flush_all_relay_logs()
|
|||
mi->release();
|
||||
mysql_mutex_lock(&LOCK_active_mi);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
result= true;
|
||||
break;
|
||||
|
|
|
@ -337,7 +337,7 @@ do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco,
|
|||
thd->set_time_for_next_stage();
|
||||
do
|
||||
{
|
||||
if (thd->check_killed() && !rgi->worker_error)
|
||||
if (unlikely(thd->check_killed()) && !rgi->worker_error)
|
||||
{
|
||||
DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed");
|
||||
thd->clear_error();
|
||||
|
@ -402,7 +402,7 @@ do_ftwrl_wait(rpl_group_info *rgi,
|
|||
{
|
||||
if (entry->force_abort || rgi->worker_error)
|
||||
break;
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
slave_output_error_info(rgi, thd);
|
||||
|
@ -453,7 +453,7 @@ pool_mark_busy(rpl_parallel_thread_pool *pool, THD *thd)
|
|||
}
|
||||
while (pool->busy)
|
||||
{
|
||||
if (thd && thd->check_killed())
|
||||
if (thd && unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
res= 1;
|
||||
|
@ -571,7 +571,7 @@ rpl_pause_for_ftwrl(THD *thd)
|
|||
e->last_committed_sub_id < e->pause_sub_id &&
|
||||
!err)
|
||||
{
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
err= 1;
|
||||
|
@ -838,7 +838,7 @@ do_retry:
|
|||
}
|
||||
DBUG_EXECUTE_IF("inject_mdev8031", {
|
||||
/* Simulate pending KILL caught in read_relay_log_description_event(). */
|
||||
if (thd->check_killed()) {
|
||||
if (unlikely(thd->check_killed())) {
|
||||
thd->send_kill_message();
|
||||
err= 1;
|
||||
goto err;
|
||||
|
@ -862,13 +862,13 @@ do_retry:
|
|||
|
||||
if (ev)
|
||||
break;
|
||||
if (rlog.error < 0)
|
||||
if (unlikely(rlog.error < 0))
|
||||
{
|
||||
errmsg= "slave SQL thread aborted because of I/O error";
|
||||
err= 1;
|
||||
goto check_retry;
|
||||
}
|
||||
if (rlog.error > 0)
|
||||
if (unlikely(rlog.error > 0))
|
||||
{
|
||||
sql_print_error("Slave SQL thread: I/O error reading "
|
||||
"event(errno: %d cur_log->error: %d)",
|
||||
|
@ -1288,7 +1288,7 @@ handle_rpl_parallel_thread(void *arg)
|
|||
if (!err)
|
||||
#endif
|
||||
{
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->clear_error();
|
||||
thd->get_stmt_da()->reset_diagnostics_area();
|
||||
|
@ -1301,7 +1301,7 @@ handle_rpl_parallel_thread(void *arg)
|
|||
delete_or_keep_event_post_apply(rgi, event_type, qev->ev);
|
||||
DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100",
|
||||
err= dbug_simulate_tmp_error(rgi, thd););
|
||||
if (err)
|
||||
if (unlikely(err))
|
||||
{
|
||||
convert_kill_to_deadlock_error(rgi);
|
||||
if (has_temporary_error(thd) && slave_trans_retries > 0)
|
||||
|
@ -2075,7 +2075,7 @@ rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond,
|
|||
/* The thread is ready to queue into. */
|
||||
break;
|
||||
}
|
||||
else if (rli->sql_driver_thd->check_killed())
|
||||
else if (unlikely(rli->sql_driver_thd->check_killed()))
|
||||
{
|
||||
unlock_or_exit_cond(rli->sql_driver_thd, &thr->LOCK_rpl_thread,
|
||||
did_enter_cond, old_stage);
|
||||
|
@ -2401,7 +2401,7 @@ rpl_parallel::wait_for_workers_idle(THD *thd)
|
|||
&stage_waiting_for_workers_idle, &old_stage);
|
||||
while (e->current_sub_id > e->last_committed_sub_id)
|
||||
{
|
||||
if (thd->check_killed())
|
||||
if (unlikely(thd->check_killed()))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
err= 1;
|
||||
|
|
|
@ -196,6 +196,7 @@ unpack_row(rpl_group_info *rgi,
|
|||
uchar const **const current_row_end, ulong *const master_reclength,
|
||||
uchar const *const row_end)
|
||||
{
|
||||
int error;
|
||||
DBUG_ENTER("unpack_row");
|
||||
DBUG_ASSERT(row_data);
|
||||
DBUG_ASSERT(table);
|
||||
|
@ -419,7 +420,7 @@ unpack_row(rpl_group_info *rgi,
|
|||
/*
|
||||
Add Extra slave persistent columns
|
||||
*/
|
||||
if (int error= fill_extra_persistent_columns(table, cols->n_bits))
|
||||
if (unlikely(error= fill_extra_persistent_columns(table, cols->n_bits)))
|
||||
DBUG_RETURN(error);
|
||||
|
||||
/*
|
||||
|
|
|
@ -142,7 +142,7 @@ int Relay_log_info::init(const char* info_fname)
|
|||
log_space_limit= relay_log_space_limit;
|
||||
log_space_total= 0;
|
||||
|
||||
if (error_on_rli_init_info)
|
||||
if (unlikely(error_on_rli_init_info))
|
||||
goto err;
|
||||
|
||||
char pattern[FN_REFLEN];
|
||||
|
@ -306,7 +306,7 @@ Failed to open the existing relay log info file '%s' (errno %d)",
|
|||
fname);
|
||||
error= 1;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (info_fd >= 0)
|
||||
mysql_file_close(info_fd, MYF(0));
|
||||
|
@ -415,7 +415,7 @@ Failed to open the existing relay log info file '%s' (errno %d)",
|
|||
before Relay_log_info::flush()
|
||||
*/
|
||||
reinit_io_cache(&info_file, WRITE_CACHE,0L,0,1);
|
||||
if ((error= flush()))
|
||||
if (unlikely((error= flush())))
|
||||
{
|
||||
msg= "Failed to flush relay log info file";
|
||||
goto err;
|
||||
|
@ -2206,7 +2206,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
|
|||
to rollback before continuing with the next events.
|
||||
4) so we need this "context cleanup" function.
|
||||
*/
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
trans_rollback_stmt(thd); // if a "statement transaction"
|
||||
/* trans_rollback() also resets OPTION_GTID_BEGIN */
|
||||
|
@ -2220,7 +2220,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
|
|||
m_table_map.clear_tables();
|
||||
slave_close_thread_tables(thd);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
thd->mdl_context.release_transactional_locks();
|
||||
|
||||
|
|
|
@ -779,7 +779,6 @@ void Repl_semi_sync_master::dump_end(THD* thd)
|
|||
int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name,
|
||||
my_off_t trx_wait_binlog_pos)
|
||||
{
|
||||
|
||||
DBUG_ENTER("Repl_semi_sync_master::commit_trx");
|
||||
|
||||
if (get_master_enabled() && trx_wait_binlog_name)
|
||||
|
@ -788,15 +787,16 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name,
|
|||
struct timespec abstime;
|
||||
int wait_result;
|
||||
PSI_stage_info old_stage;
|
||||
THD *thd= current_thd;
|
||||
|
||||
set_timespec(start_ts, 0);
|
||||
|
||||
DEBUG_SYNC(current_thd, "rpl_semisync_master_commit_trx_before_lock");
|
||||
DEBUG_SYNC(thd, "rpl_semisync_master_commit_trx_before_lock");
|
||||
/* Acquire the mutex. */
|
||||
lock();
|
||||
|
||||
/* This must be called after acquired the lock */
|
||||
THD_ENTER_COND(NULL, &COND_binlog_send, &LOCK_binlog,
|
||||
THD_ENTER_COND(thd, &COND_binlog_send, &LOCK_binlog,
|
||||
& stage_waiting_for_semi_sync_ack_from_slave,
|
||||
& old_stage);
|
||||
|
||||
|
@ -809,7 +809,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name,
|
|||
trx_wait_binlog_name, (ulong)trx_wait_binlog_pos,
|
||||
(int)is_on()));
|
||||
|
||||
while (is_on() && !thd_killed(current_thd))
|
||||
while (is_on() && !thd_killed(thd))
|
||||
{
|
||||
if (m_reply_file_name_inited)
|
||||
{
|
||||
|
@ -924,7 +924,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name,
|
|||
m_active_tranxs may be NULL if someone disabled semi sync during
|
||||
cond_timewait()
|
||||
*/
|
||||
assert(thd_killed(current_thd) || !m_active_tranxs ||
|
||||
assert(thd_killed(thd) || !m_active_tranxs ||
|
||||
!m_active_tranxs->is_tranx_end_pos(trx_wait_binlog_name,
|
||||
trx_wait_binlog_pos));
|
||||
|
||||
|
@ -937,7 +937,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name,
|
|||
|
||||
/* The lock held will be released by thd_exit_cond, so no need to
|
||||
call unlock() here */
|
||||
THD_EXIT_COND(NULL, & old_stage);
|
||||
THD_EXIT_COND(thd, &old_stage);
|
||||
}
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
|
|
@ -245,10 +245,10 @@ uchar *sys_var::global_value_ptr(THD *thd, const LEX_CSTRING *base)
|
|||
|
||||
bool sys_var::check(THD *thd, set_var *var)
|
||||
{
|
||||
if ((var->value && do_check(thd, var))
|
||||
|| (on_check && on_check(this, thd, var)))
|
||||
if (unlikely((var->value && do_check(thd, var)) ||
|
||||
(on_check && on_check(this, thd, var))))
|
||||
{
|
||||
if (!thd->is_error())
|
||||
if (likely(!thd->is_error()))
|
||||
{
|
||||
char buff[STRING_BUFFER_USUAL_SIZE];
|
||||
String str(buff, sizeof(buff), system_charset_info), *res;
|
||||
|
@ -718,10 +718,10 @@ int sql_set_variables(THD *thd, List<set_var_base> *var_list, bool free)
|
|||
set_var_base *var;
|
||||
while ((var=it++))
|
||||
{
|
||||
if ((error= var->check(thd)))
|
||||
if (unlikely((error= var->check(thd))))
|
||||
goto err;
|
||||
}
|
||||
if (was_error || !(error= MY_TEST(thd->is_error())))
|
||||
if (unlikely(was_error) || likely(!(error= MY_TEST(thd->is_error()))))
|
||||
{
|
||||
it.rewind();
|
||||
while ((var= it++))
|
||||
|
|
85
sql/slave.cc
85
sql/slave.cc
|
@ -344,7 +344,7 @@ gtid_pos_table_creation(THD *thd, plugin_ref engine, LEX_CSTRING *table_name)
|
|||
goto end;
|
||||
mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
|
||||
FALSE, FALSE);
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
err= 1;
|
||||
end:
|
||||
thd->variables.option_bits= thd_saved_option;
|
||||
|
@ -739,7 +739,7 @@ int init_slave()
|
|||
|
||||
thd->reset_globals();
|
||||
delete thd;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
sql_print_error("Failed to create slave threads");
|
||||
goto err;
|
||||
|
@ -885,7 +885,7 @@ bool init_slave_skip_errors(const char* arg)
|
|||
if (!arg || !*arg) // No errors defined
|
||||
goto end;
|
||||
|
||||
if (my_bitmap_init(&slave_error_mask,0,MAX_SLAVE_ERROR,0))
|
||||
if (unlikely(my_bitmap_init(&slave_error_mask,0,MAX_SLAVE_ERROR,0)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
use_slave_mask= 1;
|
||||
|
@ -978,10 +978,10 @@ bool init_slave_transaction_retry_errors(const char* arg)
|
|||
p++;
|
||||
}
|
||||
|
||||
if (!(slave_transaction_retry_errors=
|
||||
(uint *) my_once_alloc(sizeof(int) *
|
||||
slave_transaction_retry_error_length,
|
||||
MYF(MY_WME))))
|
||||
if (unlikely(!(slave_transaction_retry_errors=
|
||||
(uint *) my_once_alloc(sizeof(int) *
|
||||
slave_transaction_retry_error_length,
|
||||
MYF(MY_WME)))))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
|
@ -1030,11 +1030,12 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock)
|
|||
}
|
||||
else
|
||||
mi->rli.abort_slave=1;
|
||||
if ((error=terminate_slave_thread(mi->rli.sql_driver_thd, sql_lock,
|
||||
&mi->rli.stop_cond,
|
||||
&mi->rli.slave_running,
|
||||
skip_lock)) &&
|
||||
!force_all)
|
||||
if (unlikely((error= terminate_slave_thread(mi->rli.sql_driver_thd,
|
||||
sql_lock,
|
||||
&mi->rli.stop_cond,
|
||||
&mi->rli.slave_running,
|
||||
skip_lock))) &&
|
||||
!force_all)
|
||||
DBUG_RETURN(error);
|
||||
retval= error;
|
||||
|
||||
|
@ -1052,11 +1053,11 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock)
|
|||
{
|
||||
DBUG_PRINT("info",("Terminating IO thread"));
|
||||
mi->abort_slave=1;
|
||||
if ((error=terminate_slave_thread(mi->io_thd, io_lock,
|
||||
&mi->stop_cond,
|
||||
&mi->slave_running,
|
||||
skip_lock)) &&
|
||||
!force_all)
|
||||
if (unlikely((error= terminate_slave_thread(mi->io_thd, io_lock,
|
||||
&mi->stop_cond,
|
||||
&mi->slave_running,
|
||||
skip_lock))) &&
|
||||
!force_all)
|
||||
DBUG_RETURN(error);
|
||||
if (!retval)
|
||||
retval= error;
|
||||
|
@ -1232,8 +1233,9 @@ int start_slave_thread(
|
|||
}
|
||||
start_id= *slave_run_id;
|
||||
DBUG_PRINT("info",("Creating new slave thread"));
|
||||
if ((error = mysql_thread_create(thread_key,
|
||||
&th, &connection_attrib, h_func, (void*)mi)))
|
||||
if (unlikely((error= mysql_thread_create(thread_key,
|
||||
&th, &connection_attrib, h_func,
|
||||
(void*)mi))))
|
||||
{
|
||||
sql_print_error("Can't create slave thread (errno= %d).", error);
|
||||
if (start_lock)
|
||||
|
@ -1346,7 +1348,7 @@ int start_slave_threads(THD *thd,
|
|||
mi->rli.restart_gtid_pos.reset();
|
||||
}
|
||||
|
||||
if (!error && (thread_mask & SLAVE_IO))
|
||||
if (likely(!error) && likely((thread_mask & SLAVE_IO)))
|
||||
error= start_slave_thread(
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
key_thread_slave_io,
|
||||
|
@ -1355,7 +1357,7 @@ int start_slave_threads(THD *thd,
|
|||
cond_io,
|
||||
&mi->slave_running, &mi->slave_run_id,
|
||||
mi);
|
||||
if (!error && (thread_mask & SLAVE_SQL))
|
||||
if (likely(!error) && likely(thread_mask & SLAVE_SQL))
|
||||
{
|
||||
error= start_slave_thread(
|
||||
#ifdef HAVE_PSI_INTERFACE
|
||||
|
@ -1365,7 +1367,7 @@ int start_slave_threads(THD *thd,
|
|||
cond_sql,
|
||||
&mi->rli.slave_running, &mi->rli.slave_run_id,
|
||||
mi);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
terminate_slave_threads(mi, thread_mask & SLAVE_IO, !need_slave_mutex);
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
|
@ -2337,7 +2339,8 @@ past_checksum:
|
|||
*/
|
||||
if (opt_replicate_events_marked_for_skip == RPL_SKIP_FILTER_ON_MASTER)
|
||||
{
|
||||
if (mysql_real_query(mysql, STRING_WITH_LEN("SET skip_replication=1")))
|
||||
if (unlikely(mysql_real_query(mysql,
|
||||
STRING_WITH_LEN("SET skip_replication=1"))))
|
||||
{
|
||||
err_code= mysql_errno(mysql);
|
||||
if (is_network_error(err_code))
|
||||
|
@ -2381,7 +2384,7 @@ past_checksum:
|
|||
STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_ANNOTATE))),
|
||||
mysql_real_query(mysql, STRING_WITH_LEN("SET @mariadb_slave_capability="
|
||||
STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_MINE))));
|
||||
if (rc)
|
||||
if (unlikely(rc))
|
||||
{
|
||||
err_code= mysql_errno(mysql);
|
||||
if (is_network_error(err_code))
|
||||
|
@ -2457,7 +2460,7 @@ after_set_capability:
|
|||
query_str.append(STRING_WITH_LEN("'"), system_charset_info);
|
||||
|
||||
rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
|
||||
if (rc)
|
||||
if (unlikely(rc))
|
||||
{
|
||||
err_code= mysql_errno(mysql);
|
||||
if (is_network_error(err_code))
|
||||
|
@ -2490,7 +2493,7 @@ after_set_capability:
|
|||
}
|
||||
|
||||
rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
|
||||
if (rc)
|
||||
if (unlikely(rc))
|
||||
{
|
||||
err_code= mysql_errno(mysql);
|
||||
if (is_network_error(err_code))
|
||||
|
@ -2523,7 +2526,7 @@ after_set_capability:
|
|||
}
|
||||
|
||||
rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
|
||||
if (rc)
|
||||
if (unlikely(rc))
|
||||
{
|
||||
err_code= mysql_errno(mysql);
|
||||
if (is_network_error(err_code))
|
||||
|
@ -2559,7 +2562,7 @@ after_set_capability:
|
|||
query_str.append(STRING_WITH_LEN("'"), system_charset_info);
|
||||
|
||||
rc= mysql_real_query(mysql, query_str.ptr(), query_str.length());
|
||||
if (rc)
|
||||
if (unlikely(rc))
|
||||
{
|
||||
err_code= mysql_errno(mysql);
|
||||
if (is_network_error(err_code))
|
||||
|
@ -3674,7 +3677,7 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings,
|
|||
#endif
|
||||
|
||||
len = cli_safe_read_reallen(mysql, network_read_len);
|
||||
if (len == packet_error || (long) len < 1)
|
||||
if (unlikely(len == packet_error || (long) len < 1))
|
||||
{
|
||||
if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED)
|
||||
{
|
||||
|
@ -3739,7 +3742,7 @@ has_temporary_error(THD *thd)
|
|||
error or not. This is currently the case for Incident_log_event,
|
||||
which sets no message. Return FALSE.
|
||||
*/
|
||||
if (!thd->is_error())
|
||||
if (!likely(thd->is_error()))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
current_errno= thd->get_stmt_da()->sql_errno();
|
||||
|
@ -3960,7 +3963,7 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
|
|||
TODO: Replace this with a decent error message when merged
|
||||
with BUG#24954 (which adds several new error message).
|
||||
*/
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, rgi->gtid_info(),
|
||||
"It was not possible to update the positions"
|
||||
|
@ -4351,7 +4354,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
update_log_pos failed: this should not happen, so we don't
|
||||
retry.
|
||||
*/
|
||||
if (exec_res == 2)
|
||||
if (unlikely(exec_res == 2))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
|
@ -4363,7 +4366,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||
if (slave_trans_retries)
|
||||
{
|
||||
int UNINIT_VAR(temp_err);
|
||||
if (exec_res && (temp_err= has_temporary_error(thd)))
|
||||
if (unlikely(exec_res) && (temp_err= has_temporary_error(thd)))
|
||||
{
|
||||
const char *errmsg;
|
||||
rli->clear_error();
|
||||
|
@ -4800,7 +4803,7 @@ connected:
|
|||
if (check_io_slave_killed(mi, NullS))
|
||||
goto err;
|
||||
|
||||
if (event_len == packet_error)
|
||||
if (unlikely(event_len == packet_error))
|
||||
{
|
||||
uint mysql_error_number= mysql_errno(mysql);
|
||||
switch (mysql_error_number) {
|
||||
|
@ -5103,7 +5106,7 @@ slave_output_error_info(rpl_group_info *rgi, THD *thd)
|
|||
Relay_log_info *rli= rgi->rli;
|
||||
uint32 const last_errno= rli->last_error().number;
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
char const *const errmsg= thd->get_stmt_da()->message();
|
||||
|
||||
|
@ -5147,7 +5150,7 @@ slave_output_error_info(rpl_group_info *rgi, THD *thd)
|
|||
udf_error = true;
|
||||
sql_print_warning("Slave: %s Error_code: %d", err->get_message_text(), err->get_sql_errno());
|
||||
}
|
||||
if (udf_error)
|
||||
if (unlikely(udf_error))
|
||||
{
|
||||
StringBuffer<100> tmp;
|
||||
if (rli->mi->using_gtid != Master_info::USE_GTID_NO)
|
||||
|
@ -5427,7 +5430,7 @@ pthread_handler_t handle_slave_sql(void *arg)
|
|||
if (opt_init_slave.length)
|
||||
{
|
||||
execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave);
|
||||
if (thd->is_slave_error)
|
||||
if (unlikely(thd->is_slave_error))
|
||||
{
|
||||
rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL,
|
||||
"Slave SQL thread aborted. Can't execute init_slave query");
|
||||
|
@ -6932,7 +6935,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
|
|||
}
|
||||
mysql_mutex_unlock(log_lock);
|
||||
|
||||
if (!error &&
|
||||
if (likely(!error) &&
|
||||
mi->using_gtid != Master_info::USE_GTID_NO &&
|
||||
mi->events_queued_since_last_gtid > 0 &&
|
||||
( (mi->last_queued_gtid_standalone &&
|
||||
|
@ -6981,11 +6984,11 @@ err:
|
|||
Do not print ER_SLAVE_RELAY_LOG_WRITE_FAILURE error here, as the caller
|
||||
handle_slave_io() prints it on return.
|
||||
*/
|
||||
if (error && error != ER_SLAVE_RELAY_LOG_WRITE_FAILURE)
|
||||
if (unlikely(error) && error != ER_SLAVE_RELAY_LOG_WRITE_FAILURE)
|
||||
mi->report(ERROR_LEVEL, error, NULL, ER_DEFAULT(error),
|
||||
error_msg.ptr());
|
||||
|
||||
if(is_malloc)
|
||||
if (unlikely(is_malloc))
|
||||
my_free((void *)new_buf);
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -7452,7 +7455,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
|
|||
}
|
||||
if (opt_reckless_slave) // For mysql-test
|
||||
cur_log->error = 0;
|
||||
if (cur_log->error < 0)
|
||||
if (unlikely(cur_log->error < 0))
|
||||
{
|
||||
errmsg = "slave SQL thread aborted because of I/O error";
|
||||
if (hot_log)
|
||||
|
|
|
@ -2275,7 +2275,7 @@ bool sp_add_used_routine(Query_tables_list *prelocking_ctx, Query_arena *arena,
|
|||
{
|
||||
Sroutine_hash_entry *rn=
|
||||
(Sroutine_hash_entry *)arena->alloc(sizeof(Sroutine_hash_entry));
|
||||
if (!rn) // OOM. Error will be reported using fatal_error().
|
||||
if (unlikely(!rn)) // OOM. Error will be reported using fatal_error().
|
||||
return FALSE;
|
||||
rn->mdl_request.init(key, MDL_SHARED, MDL_TRANSACTION);
|
||||
if (my_hash_insert(&prelocking_ctx->sroutines, (uchar *)rn))
|
||||
|
@ -2787,7 +2787,7 @@ int Sp_handler::sp_cache_routine(THD *thd,
|
|||
an error with it's return value without calling my_error(), we
|
||||
set the generic "mysql.proc table corrupt" error here.
|
||||
*/
|
||||
if (! thd->is_error())
|
||||
if (!thd->is_error())
|
||||
{
|
||||
my_error(ER_SP_PROC_TABLE_CORRUPT, MYF(0),
|
||||
ErrConvDQName(name).ptr(), ret);
|
||||
|
|
|
@ -1377,7 +1377,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
|
|||
errors are not catchable by SQL handlers) or the connection has been
|
||||
killed during execution.
|
||||
*/
|
||||
if (!thd->is_fatal_error && !thd->killed_errno() &&
|
||||
if (likely(!thd->is_fatal_error) && likely(!thd->killed_errno()) &&
|
||||
ctx->handle_sql_condition(thd, &ip, i))
|
||||
{
|
||||
err_status= FALSE;
|
||||
|
@ -1386,7 +1386,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
|
|||
/* Reset sp_rcontext::end_partial_result_set flag. */
|
||||
ctx->end_partial_result_set= FALSE;
|
||||
|
||||
} while (!err_status && !thd->killed && !thd->is_fatal_error &&
|
||||
} while (!err_status && likely(!thd->killed) &&
|
||||
likely(!thd->is_fatal_error) &&
|
||||
!thd->spcont->pause_state);
|
||||
|
||||
#if defined(ENABLED_PROFILING)
|
||||
|
@ -3311,7 +3312,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
|
|||
if (open_tables)
|
||||
res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables);
|
||||
|
||||
if (!res)
|
||||
if (likely(!res))
|
||||
{
|
||||
res= instr->exec_core(thd, nextp);
|
||||
DBUG_PRINT("info",("exec_core returned: %d", res));
|
||||
|
@ -3371,7 +3372,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
|
|||
Update the state of the active arena if no errors on
|
||||
open_tables stage.
|
||||
*/
|
||||
if (!res || !thd->is_error() ||
|
||||
if (likely(!res) || likely(!thd->is_error()) ||
|
||||
(thd->get_stmt_da()->sql_errno() != ER_CANT_REOPEN_TABLE &&
|
||||
thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE &&
|
||||
thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE &&
|
||||
|
@ -3542,7 +3543,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
|
|||
thd->set_query(query_backup);
|
||||
thd->query_name_consts= 0;
|
||||
|
||||
if (!thd->is_error())
|
||||
if (likely(!thd->is_error()))
|
||||
{
|
||||
res= 0;
|
||||
thd->get_stmt_da()->reset_diagnostics_area();
|
||||
|
|
|
@ -234,7 +234,7 @@ bool Qualified_column_ident::resolve_type_ref(THD *thd, Column_definition *def)
|
|||
!open_tables_only_view_structure(thd, table_list,
|
||||
thd->mdl_context.has_locks()))
|
||||
{
|
||||
if ((src= lex.query_tables->table->find_field_by_name(&m_column)))
|
||||
if (likely((src= lex.query_tables->table->find_field_by_name(&m_column))))
|
||||
{
|
||||
if (!(rc= check_column_grant_for_type_ref(thd, table_list,
|
||||
m_column.str,
|
||||
|
@ -486,14 +486,14 @@ bool sp_rcontext::handle_sql_condition(THD *thd,
|
|||
handlers from this context are applicable: try to locate one
|
||||
in the outer scope.
|
||||
*/
|
||||
if (thd->is_fatal_sub_stmt_error && m_in_sub_stmt)
|
||||
if (unlikely(thd->is_fatal_sub_stmt_error) && m_in_sub_stmt)
|
||||
DBUG_RETURN(false);
|
||||
|
||||
Diagnostics_area *da= thd->get_stmt_da();
|
||||
const sp_handler *found_handler= NULL;
|
||||
const Sql_condition *found_condition= NULL;
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
found_handler=
|
||||
cur_spi->m_ctx->find_handler(da->get_error_condition_identity());
|
||||
|
|
|
@ -406,7 +406,7 @@ Geometry *Geometry::create_from_json(Geometry_buffer *buffer,
|
|||
key_buf[key_len++]= (uchar)je->s.c_next | 0x20; /* make it lowercase. */
|
||||
}
|
||||
|
||||
if (je->s.error)
|
||||
if (unlikely(je->s.error))
|
||||
goto err_return;
|
||||
|
||||
if (key_len == type_keyname_len &&
|
||||
|
@ -1956,6 +1956,7 @@ bool Gis_multi_point::init_from_json(json_engine_t *je, bool er_on_3D,
|
|||
|
||||
if (je->s.error)
|
||||
return TRUE;
|
||||
|
||||
if (n_points == 0)
|
||||
{
|
||||
je->s.error= Geometry::GEOJ_EMPTY_COORDINATES;
|
||||
|
@ -2231,6 +2232,7 @@ bool Gis_multi_line_string::init_from_json(json_engine_t *je, bool er_on_3D,
|
|||
|
||||
n_line_strings++;
|
||||
}
|
||||
|
||||
if (je->s.error)
|
||||
return TRUE;
|
||||
|
||||
|
@ -2629,8 +2631,10 @@ bool Gis_multi_polygon::init_from_json(json_engine_t *je, bool er_on_3D,
|
|||
|
||||
n_polygons++;
|
||||
}
|
||||
|
||||
if (je->s.error)
|
||||
return TRUE;
|
||||
|
||||
if (n_polygons == 0)
|
||||
{
|
||||
je->s.error= Geometry::GEOJ_EMPTY_COORDINATES;
|
||||
|
|
118
sql/sql_acl.cc
118
sql/sql_acl.cc
|
@ -2224,7 +2224,7 @@ bool acl_reload(THD *thd)
|
|||
To avoid deadlocks we should obtain table locks before
|
||||
obtaining acl_cache->lock mutex.
|
||||
*/
|
||||
if ((result= tables.open_and_lock(thd)))
|
||||
if (unlikely((result= tables.open_and_lock(thd))))
|
||||
{
|
||||
DBUG_ASSERT(result <= 0);
|
||||
/*
|
||||
|
@ -3470,7 +3470,8 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
|
|||
user_table.default_role()->store(acl_user->default_rolename.str,
|
||||
acl_user->default_rolename.length,
|
||||
system_charset_info);
|
||||
if ((error=table->file->ha_update_row(table->record[1],table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{
|
||||
mysql_mutex_unlock(&acl_cache->lock);
|
||||
|
@ -3829,7 +3830,8 @@ static bool update_user_table(THD *thd, const User_table& user_table,
|
|||
new_password_len);
|
||||
|
||||
|
||||
if ((error=table->file->ha_update_row(table->record[1],table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
|
||||
|
@ -4121,8 +4123,8 @@ static int replace_user_table(THD *thd, const User_table &user_table,
|
|||
*/
|
||||
if (cmp_record(table, record[1]))
|
||||
{
|
||||
if ((error=
|
||||
table->file->ha_update_row(table->record[1],table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{ // This should never happen
|
||||
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
|
||||
|
@ -4133,8 +4135,9 @@ static int replace_user_table(THD *thd, const User_table &user_table,
|
|||
error= 0;
|
||||
}
|
||||
}
|
||||
else if ((error=table->file->ha_write_row(table->record[0]))) // insert
|
||||
{ // This should never happen
|
||||
else if (unlikely(error=table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
// This should never happen
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: deadcode */
|
||||
|
@ -4145,7 +4148,7 @@ static int replace_user_table(THD *thd, const User_table &user_table,
|
|||
error=0; // Privileges granted / revoked
|
||||
|
||||
end:
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
acl_cache->clear(1); // Clear privilege cache
|
||||
if (old_row_exists)
|
||||
|
@ -4259,18 +4262,19 @@ static int replace_db_table(TABLE *table, const char *db,
|
|||
/* update old existing row */
|
||||
if (rights)
|
||||
{
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0]))) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
else /* must have been a revoke of all privileges */
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
}
|
||||
else if (rights && (error= table->file->ha_write_row(table->record[0])))
|
||||
else if (rights &&
|
||||
(unlikely(error= table->file->ha_write_row(table->record[0]))))
|
||||
{
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
|
@ -4347,7 +4351,7 @@ replace_roles_mapping_table(TABLE *table, LEX_CSTRING *user, LEX_CSTRING *host,
|
|||
}
|
||||
if (revoke_grant && !with_admin)
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
{
|
||||
DBUG_PRINT("info", ("error deleting row '%s' '%s' '%s'",
|
||||
host->str, user->str, role->str));
|
||||
|
@ -4358,7 +4362,8 @@ replace_roles_mapping_table(TABLE *table, LEX_CSTRING *user, LEX_CSTRING *host,
|
|||
{
|
||||
table->field[3]->store(!revoke_grant + 1);
|
||||
|
||||
if ((error= table->file->ha_update_row(table->record[1], table->record[0])))
|
||||
if (unlikely((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0]))))
|
||||
{
|
||||
DBUG_PRINT("info", ("error updating row '%s' '%s' '%s'",
|
||||
host->str, user->str, role->str));
|
||||
|
@ -4370,7 +4375,7 @@ replace_roles_mapping_table(TABLE *table, LEX_CSTRING *user, LEX_CSTRING *host,
|
|||
|
||||
table->field[3]->store(with_admin + 1);
|
||||
|
||||
if ((error= table->file->ha_write_row(table->record[0])))
|
||||
if (unlikely((error= table->file->ha_write_row(table->record[0]))))
|
||||
{
|
||||
DBUG_PRINT("info", ("error inserting row '%s' '%s' '%s'",
|
||||
host->str, user->str, role->str));
|
||||
|
@ -4501,7 +4506,7 @@ replace_proxies_priv_table(THD *thd, TABLE *table, const LEX_USER *user,
|
|||
|
||||
get_grantor(thd, grantor);
|
||||
|
||||
if ((error= table->file->ha_index_init(0, 1)))
|
||||
if (unlikely((error= table->file->ha_index_init(0, 1))))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_PRINT("info", ("ha_index_init error"));
|
||||
|
@ -4538,18 +4543,18 @@ replace_proxies_priv_table(THD *thd, TABLE *table, const LEX_USER *user,
|
|||
/* update old existing row */
|
||||
if (!revoke_grant)
|
||||
{
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
goto table_error; /* purecov: inspected */
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
goto table_error; /* purecov: inspected */
|
||||
}
|
||||
}
|
||||
else if ((error= table->file->ha_write_row(table->record[0])))
|
||||
else if (unlikely((error= table->file->ha_write_row(table->record[0]))))
|
||||
{
|
||||
DBUG_PRINT("info", ("error inserting the row"));
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
|
||||
|
@ -4952,7 +4957,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
List_iterator <LEX_COLUMN> iter(columns);
|
||||
class LEX_COLUMN *column;
|
||||
int error= table->file->ha_index_init(0, 1);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(-1);
|
||||
|
@ -5012,7 +5017,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
error=table->file->ha_update_row(table->record[1],table->record[0]);
|
||||
else
|
||||
error=table->file->ha_delete_row(table->record[1]);
|
||||
if (error && error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
result= -1; /* purecov: inspected */
|
||||
|
@ -5028,7 +5033,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
else // new grant
|
||||
{
|
||||
GRANT_COLUMN *grant_column;
|
||||
if ((error=table->file->ha_write_row(table->record[0])))
|
||||
if (unlikely((error=table->file->ha_write_row(table->record[0]))))
|
||||
{
|
||||
table->file->print_error(error,MYF(0)); /* purecov: inspected */
|
||||
result= -1; /* purecov: inspected */
|
||||
|
@ -5083,8 +5088,9 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
if (privileges)
|
||||
{
|
||||
int tmp_error;
|
||||
if ((tmp_error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(tmp_error=
|
||||
table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
tmp_error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{ /* purecov: deadcode */
|
||||
table->file->print_error(tmp_error,MYF(0)); /* purecov: deadcode */
|
||||
|
@ -5100,7 +5106,8 @@ static int replace_column_table(GRANT_TABLE *g_t,
|
|||
else
|
||||
{
|
||||
int tmp_error;
|
||||
if ((tmp_error = table->file->ha_delete_row(table->record[1])))
|
||||
if (unlikely((tmp_error=
|
||||
table->file->ha_delete_row(table->record[1]))))
|
||||
{ /* purecov: deadcode */
|
||||
table->file->print_error(tmp_error,MYF(0)); /* purecov: deadcode */
|
||||
result= -1; /* purecov: deadcode */
|
||||
|
@ -5226,18 +5233,18 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
|
|||
{
|
||||
if (store_table_rights || store_col_rights)
|
||||
{
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
else if ((error = table->file->ha_delete_row(table->record[1])))
|
||||
else if (unlikely((error = table->file->ha_delete_row(table->record[1]))))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
else
|
||||
{
|
||||
error=table->file->ha_write_row(table->record[0]);
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
|
||||
if (unlikely(table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)))
|
||||
goto table_error; /* purecov: deadcode */
|
||||
}
|
||||
|
||||
|
@ -5352,18 +5359,18 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
|
|||
{
|
||||
if (store_proc_rights)
|
||||
{
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
goto table_error;
|
||||
}
|
||||
else if ((error= table->file->ha_delete_row(table->record[1])))
|
||||
else if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
|
||||
goto table_error;
|
||||
}
|
||||
else
|
||||
{
|
||||
error=table->file->ha_write_row(table->record[0]);
|
||||
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
|
||||
if (unlikely(table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)))
|
||||
goto table_error;
|
||||
}
|
||||
|
||||
|
@ -6375,13 +6382,13 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
|
|||
column->column.ptr(), NULL, NULL,
|
||||
NULL, TRUE, FALSE,
|
||||
&unused_field_idx, FALSE, &dummy);
|
||||
if (f == (Field*)0)
|
||||
if (unlikely(f == (Field*)0))
|
||||
{
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0),
|
||||
column->column.c_ptr(), table_list->alias.str);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if (f == (Field *)-1)
|
||||
if (unlikely(f == (Field *)-1))
|
||||
DBUG_RETURN(TRUE);
|
||||
column_priv|= column->rights;
|
||||
}
|
||||
|
@ -6464,7 +6471,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
|
|||
0, revoke_grant, create_new_users,
|
||||
MY_TEST(thd->variables.sql_mode &
|
||||
MODE_NO_AUTO_CREATE_USER));
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
result= TRUE; // Remember error
|
||||
continue; // Add next user
|
||||
|
@ -9244,8 +9251,8 @@ static int modify_grant_table(TABLE *table, Field *host_field,
|
|||
system_charset_info);
|
||||
user_field->store(user_to->user.str, user_to->user.length,
|
||||
system_charset_info);
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
table->file->print_error(error, MYF(0));
|
||||
else
|
||||
|
@ -9254,7 +9261,7 @@ static int modify_grant_table(TABLE *table, Field *host_field,
|
|||
else
|
||||
{
|
||||
/* delete */
|
||||
if ((error=table->file->ha_delete_row(table->record[0])))
|
||||
if (unlikely((error=table->file->ha_delete_row(table->record[0]))))
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
|
||||
|
@ -9286,7 +9293,8 @@ static int handle_roles_mappings_table(TABLE *table, bool drop,
|
|||
DBUG_PRINT("info", ("Rewriting entry in roles_mapping table: %s@%s",
|
||||
user_from->user.str, user_from->host.str));
|
||||
table->use_all_columns();
|
||||
if (unlikely(error= table->file->ha_rnd_init_with_error(1)))
|
||||
|
||||
if (unlikely(table->file->ha_rnd_init_with_error(1)))
|
||||
result= -1;
|
||||
else
|
||||
{
|
||||
|
@ -9318,7 +9326,7 @@ static int handle_roles_mappings_table(TABLE *table, bool drop,
|
|||
|
||||
if (drop) /* drop if requested */
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[0])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[0]))))
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
else if (user_to)
|
||||
|
@ -9326,8 +9334,8 @@ static int handle_roles_mappings_table(TABLE *table, bool drop,
|
|||
store_record(table, record[1]);
|
||||
role_field->store(user_to->user.str, user_to->user.length,
|
||||
system_charset_info);
|
||||
if ((error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
|
@ -9418,13 +9426,14 @@ static int handle_grant_table(THD *thd, const Grant_table_base& grant_table,
|
|||
error= table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
user_key, (key_part_map)3,
|
||||
HA_READ_KEY_EXACT);
|
||||
if (!error && !*host_str)
|
||||
{ // verify that we got a role or a user, as needed
|
||||
if (!unlikely(error) && !*host_str)
|
||||
{
|
||||
// verify that we got a role or a user, as needed
|
||||
if (static_cast<const User_table&>(grant_table).check_is_role() !=
|
||||
user_from->is_role())
|
||||
error= HA_ERR_KEY_NOT_FOUND;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
|
@ -9448,7 +9457,7 @@ static int handle_grant_table(THD *thd, const Grant_table_base& grant_table,
|
|||
And their host- and user fields are not consecutive.
|
||||
Thus, we need to do a table scan to find all matching records.
|
||||
*/
|
||||
if (unlikely(error= table->file->ha_rnd_init_with_error(1)))
|
||||
if (unlikely(table->file->ha_rnd_init_with_error(1)))
|
||||
result= -1;
|
||||
else
|
||||
{
|
||||
|
@ -12639,7 +12648,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
|
|||
|
||||
DBUG_PRINT("info", ("Reading user information over SSL layer"));
|
||||
pkt_len= my_net_read(net);
|
||||
if (pkt_len == packet_error || pkt_len < NORMAL_HANDSHAKE_SIZE)
|
||||
if (unlikely(pkt_len == packet_error || pkt_len < NORMAL_HANDSHAKE_SIZE))
|
||||
{
|
||||
DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)",
|
||||
pkt_len));
|
||||
|
@ -12728,8 +12737,9 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
|
|||
Since 4.1 all database names are stored in utf8
|
||||
The cast is ok as copy_with_error will create a new area for db
|
||||
*/
|
||||
if (thd->copy_with_error(system_charset_info, (LEX_STRING*) &mpvio->db,
|
||||
thd->charset(), db, db_len))
|
||||
if (unlikely(thd->copy_with_error(system_charset_info,
|
||||
(LEX_STRING*) &mpvio->db,
|
||||
thd->charset(), db, db_len)))
|
||||
return packet_error;
|
||||
|
||||
user_len= copy_and_convert(user_buff, sizeof(user_buff) - 1,
|
||||
|
@ -12966,7 +12976,7 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf)
|
|||
else
|
||||
pkt_len= my_net_read(&mpvio->auth_info.thd->net);
|
||||
|
||||
if (pkt_len == packet_error)
|
||||
if (unlikely(pkt_len == packet_error))
|
||||
goto err;
|
||||
|
||||
mpvio->packets_read++;
|
||||
|
@ -12978,7 +12988,7 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf)
|
|||
if (mpvio->packets_read == 1)
|
||||
{
|
||||
pkt_len= parse_client_handshake_packet(mpvio, buf, pkt_len);
|
||||
if (pkt_len == packet_error)
|
||||
if (unlikely(pkt_len == packet_error))
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
|
|
|
@ -267,7 +267,7 @@ end:
|
|||
tdc_release_share(table->s);
|
||||
}
|
||||
/* In case of a temporary table there will be no metadata lock. */
|
||||
if (error && has_mdl_lock)
|
||||
if (unlikely(error) && has_mdl_lock)
|
||||
thd->mdl_context.release_transactional_locks();
|
||||
|
||||
DBUG_RETURN(error);
|
||||
|
@ -525,7 +525,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
If open_and_lock_tables() failed, close_thread_tables() will close
|
||||
the table and table->table can therefore be invalid.
|
||||
*/
|
||||
if (open_error)
|
||||
if (unlikely(open_error))
|
||||
table->table= NULL;
|
||||
|
||||
/*
|
||||
|
@ -533,7 +533,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
so any errors opening the table are logical errors.
|
||||
In these cases it does not make sense to try to repair.
|
||||
*/
|
||||
if (open_error && thd->locked_tables_mode)
|
||||
if (unlikely(open_error) && thd->locked_tables_mode)
|
||||
{
|
||||
result_code= HA_ADMIN_FAILED;
|
||||
goto send_result;
|
||||
|
@ -828,7 +828,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
|
|||
repair_table_use_frm, FALSE);
|
||||
thd->open_options&= ~extra_open_options;
|
||||
|
||||
if (!open_error)
|
||||
if (unlikely(!open_error))
|
||||
{
|
||||
TABLE *tab= table->table;
|
||||
Field **field_ptr= tab->field;
|
||||
|
|
|
@ -219,8 +219,11 @@ bool Sql_cmd_alter_table::execute(THD *thd)
|
|||
|
||||
DBUG_ENTER("Sql_cmd_alter_table::execute");
|
||||
|
||||
if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
{
|
||||
/* out of memory creating a copy of alter_info */
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
/*
|
||||
We also require DROP priv for ALTER TABLE ... DROP PARTITION, as well
|
||||
as for RENAME TO, as being done by SQLCOM_RENAME_TABLE
|
||||
|
|
|
@ -221,7 +221,7 @@ public:
|
|||
bool resize(size_t new_size, Elem default_val)
|
||||
{
|
||||
size_t old_size= elements();
|
||||
if (allocate_dynamic(&array, (uint)new_size))
|
||||
if (unlikely(allocate_dynamic(&array, (uint)new_size)))
|
||||
return true;
|
||||
|
||||
if (new_size > old_size)
|
||||
|
|
|
@ -1785,7 +1785,7 @@ retry_share:
|
|||
|
||||
share= tdc_acquire_share(thd, table_list, gts_flags, &table);
|
||||
|
||||
if (!share)
|
||||
if (unlikely(!share))
|
||||
{
|
||||
/*
|
||||
Hide "Table doesn't exist" errors if the table belongs to a view.
|
||||
|
@ -1927,7 +1927,7 @@ retry_share:
|
|||
thd->open_options, table, FALSE,
|
||||
IF_PARTITIONING(table_list->partition_names,0));
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_free(table);
|
||||
|
||||
|
@ -1972,7 +1972,7 @@ retry_share:
|
|||
table_list->table= table;
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
if (table->part_info)
|
||||
if (unlikely(table->part_info))
|
||||
{
|
||||
/* Partitions specified were incorrect.*/
|
||||
if (part_names_error)
|
||||
|
@ -2057,7 +2057,7 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
|
|||
{
|
||||
TABLE *tab= find_locked_table(thd->open_tables, db, table_name);
|
||||
|
||||
if (!tab)
|
||||
if (unlikely(!tab))
|
||||
{
|
||||
if (!no_error)
|
||||
my_error(ER_TABLE_NOT_LOCKED, MYF(0), table_name);
|
||||
|
@ -2070,8 +2070,8 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
|
|||
cases don't take a global IX lock in order to be compatible with
|
||||
global read lock.
|
||||
*/
|
||||
if (!thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "",
|
||||
MDL_INTENTION_EXCLUSIVE))
|
||||
if (unlikely(!thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "",
|
||||
MDL_INTENTION_EXCLUSIVE)))
|
||||
{
|
||||
if (!no_error)
|
||||
my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table_name);
|
||||
|
@ -2083,7 +2083,7 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
|
|||
(tab= find_locked_table(tab->next, db, table_name)))
|
||||
continue;
|
||||
|
||||
if (!tab && !no_error)
|
||||
if (unlikely(!tab && !no_error))
|
||||
my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table_name);
|
||||
|
||||
return tab;
|
||||
|
@ -3529,7 +3529,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
|
|||
error= open_table(thd, tables, ot_ctx);
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (! ot_ctx->can_recover_from_failed_open() && safe_to_ignore_table)
|
||||
{
|
||||
|
@ -3609,7 +3609,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
|
|||
if (need_prelocking && ! lex->requires_prelocking())
|
||||
lex->mark_as_requiring_prelocking(save_query_tables_last);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -3619,7 +3619,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
|
|||
/* Check and update metadata version of a base table. */
|
||||
error= check_and_update_table_version(thd, tables, tables->table->s);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto end;
|
||||
/*
|
||||
After opening a MERGE table add the children to the query list of
|
||||
|
@ -3679,7 +3679,7 @@ process_view_routines:
|
|||
if (need_prelocking && ! lex->requires_prelocking())
|
||||
lex->mark_as_requiring_prelocking(save_query_tables_last);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -4048,7 +4048,7 @@ restart:
|
|||
flags, prelocking_strategy,
|
||||
has_prelocking_list, &ot_ctx);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (ot_ctx.can_recover_from_failed_open())
|
||||
{
|
||||
|
@ -4130,7 +4130,7 @@ restart:
|
|||
if (need_prelocking && ! *start)
|
||||
*start= thd->lex->query_tables;
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (ot_ctx.can_recover_from_failed_open())
|
||||
{
|
||||
|
@ -4226,7 +4226,7 @@ error:
|
|||
THD_STAGE_INFO(thd, stage_after_opening_tables);
|
||||
thd_proc_info(thd, 0);
|
||||
|
||||
if (error && *table_to_open)
|
||||
if (unlikely(error) && *table_to_open)
|
||||
{
|
||||
(*table_to_open)->table= NULL;
|
||||
}
|
||||
|
@ -4404,7 +4404,7 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx,
|
|||
arena= thd->activate_stmt_arena_if_needed(&backup);
|
||||
|
||||
table->file->get_parent_foreign_key_list(thd, &fk_list);
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
if (arena)
|
||||
thd->restore_active_arena(arena, &backup);
|
||||
|
@ -4455,7 +4455,7 @@ handle_table(THD *thd, Query_tables_list *prelocking_ctx,
|
|||
table->internal_tables);
|
||||
if (arena)
|
||||
thd->restore_active_arena(arena, &backup);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
*need_prelocking= TRUE;
|
||||
return TRUE;
|
||||
|
@ -4696,7 +4696,7 @@ static bool check_lock_and_start_stmt(THD *thd,
|
|||
table_list->table->alias.c_ptr());
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
if ((error= table_list->table->file->start_stmt(thd, lock_type)))
|
||||
if (unlikely((error= table_list->table->file->start_stmt(thd, lock_type))))
|
||||
{
|
||||
table_list->table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(1);
|
||||
|
@ -4836,7 +4836,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type,
|
|||
break;
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
/*
|
||||
We can't have a view or some special "open_strategy" in this function
|
||||
|
@ -6178,7 +6178,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
|
|||
if (db)
|
||||
return cur_field;
|
||||
|
||||
if (found)
|
||||
if (unlikely(found))
|
||||
{
|
||||
if (report_error == REPORT_ALL_ERRORS ||
|
||||
report_error == IGNORE_EXCEPT_NON_UNIQUE)
|
||||
|
@ -6190,7 +6190,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
|
|||
}
|
||||
}
|
||||
|
||||
if (found)
|
||||
if (likely(found))
|
||||
return found;
|
||||
|
||||
/*
|
||||
|
@ -6309,7 +6309,7 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
|
|||
(if this field created from expression argument of group_concat()),
|
||||
=> we have to check presence of name before compare
|
||||
*/
|
||||
if (!item_field->name.str)
|
||||
if (unlikely(!item_field->name.str))
|
||||
continue;
|
||||
|
||||
if (table_name)
|
||||
|
@ -6427,24 +6427,27 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
|
|||
}
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
|
||||
if (likely(found))
|
||||
return found;
|
||||
|
||||
if (unlikely(found_unaliased_non_uniq))
|
||||
{
|
||||
if (found_unaliased_non_uniq)
|
||||
{
|
||||
if (report_error != IGNORE_ERRORS)
|
||||
my_error(ER_NON_UNIQ_ERROR, MYF(0),
|
||||
find->full_name(), current_thd->where);
|
||||
return (Item **) 0;
|
||||
}
|
||||
if (found_unaliased)
|
||||
{
|
||||
found= found_unaliased;
|
||||
*counter= unaliased_counter;
|
||||
*resolution= RESOLVED_BEHIND_ALIAS;
|
||||
}
|
||||
if (report_error != IGNORE_ERRORS)
|
||||
my_error(ER_NON_UNIQ_ERROR, MYF(0),
|
||||
find->full_name(), current_thd->where);
|
||||
return (Item **) 0;
|
||||
}
|
||||
if (found_unaliased)
|
||||
{
|
||||
found= found_unaliased;
|
||||
*counter= unaliased_counter;
|
||||
*resolution= RESOLVED_BEHIND_ALIAS;
|
||||
}
|
||||
|
||||
if (found)
|
||||
return found;
|
||||
|
||||
if (report_error != REPORT_EXCEPT_NOT_FOUND)
|
||||
{
|
||||
if (report_error == REPORT_ALL_ERRORS)
|
||||
|
@ -8187,7 +8190,7 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
|
|||
|
||||
if (rfield->stored_in_db())
|
||||
{
|
||||
if (value->save_in_field(rfield, 0) < 0 && !ignore_errors)
|
||||
if (unlikely(value->save_in_field(rfield, 0) < 0) && !ignore_errors)
|
||||
{
|
||||
my_message(ER_UNKNOWN_ERROR, ER_THD(thd, ER_UNKNOWN_ERROR), MYF(0));
|
||||
goto err;
|
||||
|
@ -8442,7 +8445,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
|
|||
/* Ensure that all fields are from the same table */
|
||||
DBUG_ASSERT(field->table == table);
|
||||
|
||||
if (field->invisible)
|
||||
if (unlikely(field->invisible))
|
||||
{
|
||||
all_fields_have_values= false;
|
||||
continue;
|
||||
|
@ -8454,7 +8457,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
|
|||
|
||||
if (field->field_index == autoinc_index)
|
||||
table->auto_increment_field_not_null= TRUE;
|
||||
if (field->vcol_info || (vers_sys_field && !ignore_errors))
|
||||
if (unlikely(field->vcol_info) || (vers_sys_field && !ignore_errors))
|
||||
{
|
||||
Item::Type type= value->type();
|
||||
if (type != Item::DEFAULT_VALUE_ITEM &&
|
||||
|
|
|
@ -1052,7 +1052,7 @@ void query_cache_insert(void *thd_arg, const char *packet, size_t length,
|
|||
called for this thread.
|
||||
*/
|
||||
|
||||
if (!thd)
|
||||
if (unlikely(!thd))
|
||||
return;
|
||||
|
||||
query_cache.insert(thd, &thd->query_cache_tls,
|
||||
|
|
|
@ -1174,8 +1174,8 @@ Sql_condition* THD::raise_condition(uint sql_errno,
|
|||
require memory allocation and therefore might fail. Non fatal out of
|
||||
memory errors can occur if raised by SIGNAL/RESIGNAL statement.
|
||||
*/
|
||||
if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
|
||||
sql_errno == ER_OUTOFMEMORY)))
|
||||
if (likely(!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
|
||||
sql_errno == ER_OUTOFMEMORY))))
|
||||
{
|
||||
cond= da->push_warning(this, sql_errno, sqlstate, level, ucid, msg);
|
||||
}
|
||||
|
@ -2378,12 +2378,12 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs,
|
|||
DBUG_ENTER("THD::convert_string");
|
||||
size_t new_length= to_cs->mbmaxlen * from_length;
|
||||
uint errors;
|
||||
if (alloc_lex_string(to, new_length + 1))
|
||||
if (unlikely(alloc_lex_string(to, new_length + 1)))
|
||||
DBUG_RETURN(true); // EOM
|
||||
to->length= copy_and_convert((char*) to->str, new_length, to_cs,
|
||||
from, from_length, from_cs, &errors);
|
||||
to->str[to->length]= 0; // Safety
|
||||
if (errors && lex->parse_vcol_expr)
|
||||
if (unlikely(errors) && lex->parse_vcol_expr)
|
||||
{
|
||||
my_error(ER_BAD_DATA, MYF(0),
|
||||
ErrConvString(from, from_length, from_cs).ptr(),
|
||||
|
@ -2485,7 +2485,8 @@ bool THD::copy_with_error(CHARSET_INFO *dstcs, LEX_STRING *dst,
|
|||
bool THD::convert_string(String *s, CHARSET_INFO *from_cs, CHARSET_INFO *to_cs)
|
||||
{
|
||||
uint dummy_errors;
|
||||
if (convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs, &dummy_errors))
|
||||
if (unlikely(convert_buffer.copy(s->ptr(), s->length(), from_cs, to_cs,
|
||||
&dummy_errors)))
|
||||
return TRUE;
|
||||
/* If convert_buffer >> s copying is more efficient long term */
|
||||
if (convert_buffer.alloced_length() >= convert_buffer.length() * 2 ||
|
||||
|
@ -3063,7 +3064,7 @@ bool select_send::send_eof()
|
|||
Don't send EOF if we're in error condition (which implies we've already
|
||||
sent or are sending an error)
|
||||
*/
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
return TRUE;
|
||||
::my_eof(thd);
|
||||
is_result_set_started= 0;
|
||||
|
@ -3078,10 +3079,11 @@ bool select_send::send_eof()
|
|||
bool select_to_file::send_eof()
|
||||
{
|
||||
int error= MY_TEST(end_io_cache(&cache));
|
||||
if (mysql_file_close(file, MYF(MY_WME)) || thd->is_error())
|
||||
if (unlikely(mysql_file_close(file, MYF(MY_WME))) ||
|
||||
unlikely(thd->is_error()))
|
||||
error= true;
|
||||
|
||||
if (!error && !suppress_my_ok)
|
||||
if (likely(!error) && !suppress_my_ok)
|
||||
{
|
||||
::my_ok(thd,row_count);
|
||||
}
|
||||
|
@ -3343,7 +3345,7 @@ int select_export::send_data(List<Item> &items)
|
|||
res->charset(),
|
||||
res->ptr(), res->length());
|
||||
error_pos= copier.most_important_error_pos();
|
||||
if (error_pos)
|
||||
if (unlikely(error_pos))
|
||||
{
|
||||
char printable_buff[32];
|
||||
convert_to_printable(printable_buff, sizeof(printable_buff),
|
||||
|
@ -4179,7 +4181,7 @@ bool select_dumpvar::send_eof()
|
|||
Don't send EOF if we're in error condition (which implies we've already
|
||||
sent or are sending an error)
|
||||
*/
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
return true;
|
||||
|
||||
if (!suppress_my_ok)
|
||||
|
@ -4315,9 +4317,8 @@ void thd_increment_bytes_sent(void *thd, size_t length)
|
|||
}
|
||||
}
|
||||
|
||||
my_bool thd_net_is_killed()
|
||||
my_bool thd_net_is_killed(THD *thd)
|
||||
{
|
||||
THD *thd= current_thd;
|
||||
return thd && thd->killed ? 1 : 0;
|
||||
}
|
||||
|
||||
|
@ -4785,7 +4786,7 @@ TABLE *open_purge_table(THD *thd, const char *db, size_t dblen,
|
|||
/* we don't recover here */
|
||||
DBUG_ASSERT(!error || !ot_ctx.can_recover_from_failed_open());
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
close_thread_tables(thd);
|
||||
|
||||
DBUG_RETURN(error ? NULL : tl->table);
|
||||
|
@ -6407,7 +6408,8 @@ int THD::decide_logging_format(TABLE_LIST *tables)
|
|||
clear_binlog_local_stmt_filter();
|
||||
}
|
||||
|
||||
if (error) {
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info", ("decision: no logging since an error was generated"));
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
|
@ -7249,8 +7251,11 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
|
|||
top-most close_thread_tables().
|
||||
*/
|
||||
if (this->locked_tables_mode <= LTM_LOCK_TABLES)
|
||||
if (int error= binlog_flush_pending_rows_event(TRUE, is_trans))
|
||||
{
|
||||
int error;
|
||||
if (unlikely(error= binlog_flush_pending_rows_event(TRUE, is_trans)))
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
/*
|
||||
Warnings for unsafe statements logged in statement format are
|
||||
|
@ -7518,7 +7523,7 @@ wait_for_commit::wait_for_prior_commit2(THD *thd)
|
|||
thd->ENTER_COND(&COND_wait_commit, &LOCK_wait_commit,
|
||||
&stage_waiting_for_prior_transaction_to_commit,
|
||||
&old_stage);
|
||||
while ((loc_waitee= this->waitee) && !thd->check_killed())
|
||||
while ((loc_waitee= this->waitee) && likely(!thd->check_killed()))
|
||||
mysql_cond_wait(&COND_wait_commit, &LOCK_wait_commit);
|
||||
if (!loc_waitee)
|
||||
{
|
||||
|
|
|
@ -1013,7 +1013,7 @@ public:
|
|||
inline void* calloc(size_t size)
|
||||
{
|
||||
void *ptr;
|
||||
if ((ptr=alloc_root(mem_root,size)))
|
||||
if (likely((ptr=alloc_root(mem_root,size))))
|
||||
bzero(ptr, size);
|
||||
return ptr;
|
||||
}
|
||||
|
@ -1026,7 +1026,7 @@ public:
|
|||
inline void *memdup_w_gap(const void *str, size_t size, size_t gap)
|
||||
{
|
||||
void *ptr;
|
||||
if ((ptr= alloc_root(mem_root,size+gap)))
|
||||
if (likely((ptr= alloc_root(mem_root,size+gap))))
|
||||
memcpy(ptr,str,size);
|
||||
return ptr;
|
||||
}
|
||||
|
@ -3069,7 +3069,7 @@ public:
|
|||
/* See also thd_killed() */
|
||||
inline bool check_killed()
|
||||
{
|
||||
if (killed)
|
||||
if (unlikely(killed))
|
||||
return TRUE;
|
||||
if (apc_target.have_apc_requests())
|
||||
apc_target.process_apc_requests();
|
||||
|
@ -3683,8 +3683,9 @@ public:
|
|||
{
|
||||
LEX_CSTRING *lex_str;
|
||||
char *tmp;
|
||||
if (!(lex_str= (LEX_CSTRING *)alloc_root(mem_root, sizeof(LEX_CSTRING) +
|
||||
length+1)))
|
||||
if (unlikely(!(lex_str= (LEX_CSTRING *)alloc_root(mem_root,
|
||||
sizeof(LEX_CSTRING) +
|
||||
length+1))))
|
||||
return 0;
|
||||
tmp= (char*) (lex_str+1);
|
||||
lex_str->str= tmp;
|
||||
|
@ -3697,7 +3698,7 @@ public:
|
|||
// Allocate LEX_STRING for character set conversion
|
||||
bool alloc_lex_string(LEX_STRING *dst, size_t length)
|
||||
{
|
||||
if ((dst->str= (char*) alloc(length)))
|
||||
if (likely((dst->str= (char*) alloc(length))))
|
||||
return false;
|
||||
dst->length= 0; // Safety
|
||||
return true; // EOM
|
||||
|
@ -3965,7 +3966,7 @@ public:
|
|||
The worst things that can happen is that we get
|
||||
a suboptimal error message.
|
||||
*/
|
||||
if ((killed_err= (err_info*) alloc(sizeof(*killed_err))))
|
||||
if (likely((killed_err= (err_info*) alloc(sizeof(*killed_err)))))
|
||||
{
|
||||
killed_err->no= killed_errno_arg;
|
||||
::strmake((char*) killed_err->msg, killed_err_msg_arg,
|
||||
|
@ -6397,7 +6398,8 @@ public:
|
|||
char *tmp;
|
||||
/* format: [database + dot] + name + '\0' */
|
||||
dst->length= m_db.length + dot + m_name.length;
|
||||
if (!(dst->str= tmp= (char*) alloc_root(mem_root, dst->length + 1)))
|
||||
if (unlikely(!(dst->str= tmp= (char*) alloc_root(mem_root,
|
||||
dst->length + 1))))
|
||||
return true;
|
||||
sprintf(tmp, "%.*s%.*s%.*s",
|
||||
(int) m_db.length, (m_db.length ? m_db.str : ""),
|
||||
|
@ -6413,7 +6415,7 @@ public:
|
|||
{
|
||||
char *tmp;
|
||||
size_t length= package.length + 1 + routine.length + 1;
|
||||
if (!(tmp= (char *) alloc_root(mem_root, length)))
|
||||
if (unlikely(!(tmp= (char *) alloc_root(mem_root, length))))
|
||||
return true;
|
||||
m_name.length= my_snprintf(tmp, length, "%.*s.%.*s",
|
||||
(int) package.length, package.str,
|
||||
|
@ -6427,9 +6429,9 @@ public:
|
|||
const LEX_CSTRING &package,
|
||||
const LEX_CSTRING &routine)
|
||||
{
|
||||
if (make_package_routine_name(mem_root, package, routine))
|
||||
if (unlikely(make_package_routine_name(mem_root, package, routine)))
|
||||
return true;
|
||||
if (!(m_db.str= strmake_root(mem_root, db.str, db.length)))
|
||||
if (unlikely(!(m_db.str= strmake_root(mem_root, db.str, db.length))))
|
||||
return true;
|
||||
m_db.length= db.length;
|
||||
return false;
|
||||
|
|
|
@ -167,7 +167,7 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc)
|
|||
error= 0;
|
||||
|
||||
end:
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
uc->connections--; // no need for decrease_user_connections() here
|
||||
/*
|
||||
|
@ -178,7 +178,7 @@ end:
|
|||
thd->user_connect= NULL;
|
||||
}
|
||||
mysql_mutex_unlock(&LOCK_user_conn);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
inc_host_errors(thd->main_security_ctx.ip, &errors);
|
||||
}
|
||||
|
@ -1049,7 +1049,7 @@ static int check_connection(THD *thd)
|
|||
vio_keepalive(net->vio, TRUE);
|
||||
vio_set_keepalive_options(net->vio, &opt_vio_keepalive);
|
||||
|
||||
if (thd->packet.alloc(thd->variables.net_buffer_length))
|
||||
if (unlikely(thd->packet.alloc(thd->variables.net_buffer_length)))
|
||||
{
|
||||
/*
|
||||
Important note:
|
||||
|
@ -1139,7 +1139,7 @@ bool login_connection(THD *thd)
|
|||
error= check_connection(thd);
|
||||
thd->protocol->end_statement();
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{ // Wrong permissions
|
||||
#ifdef _WIN32
|
||||
if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE)
|
||||
|
@ -1206,13 +1206,13 @@ void end_connection(THD *thd)
|
|||
thd->user_connect= NULL;
|
||||
}
|
||||
|
||||
if (thd->killed || (net->error && net->vio != 0))
|
||||
if (unlikely(thd->killed) || (net->error && net->vio != 0))
|
||||
{
|
||||
statistic_increment(aborted_threads,&LOCK_status);
|
||||
status_var_increment(thd->status_var.lost_connections);
|
||||
}
|
||||
|
||||
if (!thd->killed && (net->error && net->vio != 0))
|
||||
if (likely(!thd->killed) && (net->error && net->vio != 0))
|
||||
thd->print_aborted_warning(1, thd->get_stmt_da()->is_error()
|
||||
? thd->get_stmt_da()->message() : ER_THD(thd, ER_UNKNOWN_ERROR));
|
||||
}
|
||||
|
@ -1241,7 +1241,7 @@ void prepare_new_connection_state(THD* thd)
|
|||
if (opt_init_connect.length && !(sctx->master_access & SUPER_ACL))
|
||||
{
|
||||
execute_init_command(thd, &opt_init_connect, &LOCK_sys_init_connect);
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
Host_errors errors;
|
||||
thd->set_killed(KILL_CONNECTION);
|
||||
|
@ -1330,9 +1330,9 @@ bool thd_prepare_connection(THD *thd)
|
|||
bool thd_is_connection_alive(THD *thd)
|
||||
{
|
||||
NET *net= &thd->net;
|
||||
if (!net->error &&
|
||||
net->vio != 0 &&
|
||||
thd->killed < KILL_CONNECTION)
|
||||
if (likely(!net->error &&
|
||||
net->vio != 0 &&
|
||||
thd->killed < KILL_CONNECTION))
|
||||
return TRUE;
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -1510,7 +1510,7 @@ THD *CONNECT::create_thd(THD *thd)
|
|||
res= my_net_init(&thd->net, vio, thd, MYF(MY_THREAD_SPECIFIC));
|
||||
vio= 0; // Vio now handled by thd
|
||||
|
||||
if (res || thd->is_error())
|
||||
if (unlikely(res || thd->is_error()))
|
||||
{
|
||||
if (!thd_reused)
|
||||
delete thd;
|
||||
|
|
|
@ -293,7 +293,7 @@ static my_bool put_dbopt(const char *dbname, Schema_specification_st *create)
|
|||
strmov(opt->name, dbname);
|
||||
opt->name_length= length;
|
||||
|
||||
if ((error= my_hash_insert(&dboptions, (uchar*) opt)))
|
||||
if (unlikely((error= my_hash_insert(&dboptions, (uchar*) opt))))
|
||||
{
|
||||
my_free(opt);
|
||||
goto end;
|
||||
|
@ -724,7 +724,7 @@ mysql_alter_db_internal(THD *thd, const LEX_CSTRING *db,
|
|||
"table name to file name" encoding.
|
||||
*/
|
||||
build_table_filename(path, sizeof(path) - 1, db->str, "", MY_DB_OPT_FILE, 0);
|
||||
if ((error=write_db_opt(thd, path, create_info)))
|
||||
if (unlikely((error=write_db_opt(thd, path, create_info))))
|
||||
goto exit;
|
||||
|
||||
/* Change options if current database is being altered. */
|
||||
|
@ -754,7 +754,7 @@ mysql_alter_db_internal(THD *thd, const LEX_CSTRING *db,
|
|||
These DDL methods and logging are protected with the exclusive
|
||||
metadata lock on the schema.
|
||||
*/
|
||||
if ((error= mysql_bin_log.write(&qinfo)))
|
||||
if (unlikely((error= mysql_bin_log.write(&qinfo))))
|
||||
goto exit;
|
||||
}
|
||||
my_ok(thd, result);
|
||||
|
@ -938,7 +938,7 @@ mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, bool silen
|
|||
thd->pop_internal_handler();
|
||||
|
||||
update_binlog:
|
||||
if (!silent && !error)
|
||||
if (!silent && likely(!error))
|
||||
{
|
||||
const char *query;
|
||||
ulong query_length;
|
||||
|
@ -1036,7 +1036,7 @@ exit:
|
|||
SELECT DATABASE() in the future). For this we free() thd->db and set
|
||||
it to 0.
|
||||
*/
|
||||
if (thd->db.str && cmp_db_names(&thd->db, db) && !error)
|
||||
if (unlikely(thd->db.str && cmp_db_names(&thd->db, db) && !error))
|
||||
{
|
||||
mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server);
|
||||
SESSION_TRACKER_CHANGED(thd, CURRENT_SCHEMA_TRACKER, NULL);
|
||||
|
@ -1179,9 +1179,9 @@ static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error)
|
|||
if (pos > path && pos[-1] == FN_LIBCHAR)
|
||||
*--pos=0;
|
||||
|
||||
if ((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0)
|
||||
if (unlikely((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0))
|
||||
DBUG_RETURN(1);
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
if (mysql_file_delete(key_file_misc, path, MYF(send_error ? MY_WME : 0)))
|
||||
{
|
||||
|
@ -1196,7 +1196,7 @@ static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error)
|
|||
|
||||
if (pos > path && pos[-1] == FN_LIBCHAR)
|
||||
*--pos=0;
|
||||
if (rmdir(path) < 0 && send_error)
|
||||
if (unlikely(rmdir(path) < 0 && send_error))
|
||||
{
|
||||
my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno);
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1697,15 +1697,16 @@ bool mysql_upgrade_db(THD *thd, const LEX_CSTRING *old_db)
|
|||
length= build_table_filename(path, sizeof(path)-1, old_db->str, "", "", 0);
|
||||
if (length && path[length-1] == FN_LIBCHAR)
|
||||
path[length-1]=0; // remove ending '\'
|
||||
if ((error= my_access(path,F_OK)))
|
||||
if (unlikely((error= my_access(path,F_OK))))
|
||||
{
|
||||
my_error(ER_BAD_DB_ERROR, MYF(0), old_db->str);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Step1: Create the new database */
|
||||
if ((error= mysql_create_db_internal(thd, &new_db,
|
||||
DDL_options(), &create_info, 1)))
|
||||
if (unlikely((error= mysql_create_db_internal(thd, &new_db,
|
||||
DDL_options(), &create_info,
|
||||
1))))
|
||||
goto exit;
|
||||
|
||||
/* Step2: Move tables to the new database */
|
||||
|
|
|
@ -401,7 +401,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
}
|
||||
|
||||
const_cond_result= const_cond && (!conds || conds->val_int());
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
/* Error evaluating val_int(). */
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -439,7 +439,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
if (thd->lex->describe)
|
||||
goto produce_explain_and_leave;
|
||||
|
||||
if (!(error=table->file->ha_delete_all_rows()))
|
||||
if (likely(!(error=table->file->ha_delete_all_rows())))
|
||||
{
|
||||
/*
|
||||
If delete_all_rows() is used, it is not possible to log the
|
||||
|
@ -495,7 +495,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
table->quick_keys.clear_all(); // Can't use 'only index'
|
||||
|
||||
select=make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(TRUE);
|
||||
if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
|
||||
{
|
||||
|
@ -511,7 +511,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
Currently they rely on the user checking DA for
|
||||
errors when unwinding the stack after calling Item::val_xxx().
|
||||
*/
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(TRUE);
|
||||
my_ok(thd, 0);
|
||||
DBUG_RETURN(0); // Nothing to delete
|
||||
|
@ -662,10 +662,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
else
|
||||
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
|
||||
reverse);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto got_error;
|
||||
|
||||
if (init_ftfuncs(thd, select_lex, 1))
|
||||
if (unlikely(init_ftfuncs(thd, select_lex, 1)))
|
||||
goto got_error;
|
||||
|
||||
table->mark_columns_needed_for_delete();
|
||||
|
@ -676,9 +676,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
|
||||
if (with_select)
|
||||
{
|
||||
if (result->send_result_set_metadata(select_lex->item_list,
|
||||
Protocol::SEND_NUM_ROWS |
|
||||
Protocol::SEND_EOF))
|
||||
if (unlikely(result->send_result_set_metadata(select_lex->item_list,
|
||||
Protocol::SEND_NUM_ROWS |
|
||||
Protocol::SEND_EOF)))
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
@ -703,7 +703,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
if (record_should_be_deleted(thd, table, select, explain, truncate_history))
|
||||
{
|
||||
table->file->position(table->record[0]);
|
||||
if ((error= deltempfile->unique_add((char*) table->file->ref)))
|
||||
if (unlikely((error=
|
||||
deltempfile->unique_add((char*) table->file->ref))))
|
||||
{
|
||||
error= 1;
|
||||
goto terminate_delete;
|
||||
|
@ -713,8 +714,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
}
|
||||
}
|
||||
end_read_record(&info);
|
||||
if (deltempfile->get(table) || table->file->ha_index_or_rnd_end() ||
|
||||
init_read_record(&info, thd, table, 0, &deltempfile->sort, 0, 1, false))
|
||||
if (unlikely(deltempfile->get(table)) ||
|
||||
unlikely(table->file->ha_index_or_rnd_end()) ||
|
||||
unlikely(init_read_record(&info, thd, table, 0, &deltempfile->sort, 0,
|
||||
1, false)))
|
||||
{
|
||||
error= 1;
|
||||
goto terminate_delete;
|
||||
|
@ -723,8 +726,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
}
|
||||
|
||||
THD_STAGE_INFO(thd, stage_updating);
|
||||
while (!(error=info.read_record()) && !thd->killed &&
|
||||
! thd->is_error())
|
||||
while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
|
||||
likely(!thd->is_error()))
|
||||
{
|
||||
if (delete_while_scanning)
|
||||
delete_record= record_should_be_deleted(thd, table, select, explain,
|
||||
|
@ -746,7 +749,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
}
|
||||
|
||||
error= table->delete_row();
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
deleted++;
|
||||
if (!truncate_history && table->triggers &&
|
||||
|
@ -777,7 +780,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
Don't try unlocking the row if skip_record reported an error since in
|
||||
this case the transaction might have been rolled back already.
|
||||
*/
|
||||
else if (!thd->is_error())
|
||||
else if (likely(!thd->is_error()))
|
||||
table->file->unlock_row(); // Row failed selection, release lock on it
|
||||
else
|
||||
break;
|
||||
|
@ -785,9 +788,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
|||
|
||||
terminate_delete:
|
||||
killed_status= thd->killed;
|
||||
if (killed_status != NOT_KILLED || thd->is_error())
|
||||
if (unlikely(killed_status != NOT_KILLED || thd->is_error()))
|
||||
error= 1; // Aborted
|
||||
if (will_batch && (loc_error= table->file->end_bulk_delete()))
|
||||
if (will_batch && unlikely((loc_error= table->file->end_bulk_delete())))
|
||||
{
|
||||
if (error != 1)
|
||||
table->file->print_error(loc_error,MYF(0));
|
||||
|
@ -826,7 +829,7 @@ cleanup:
|
|||
thd->transaction.all.modified_non_trans_table= TRUE;
|
||||
|
||||
/* See similar binlogging code in sql_update.cc, for comments */
|
||||
if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
|
||||
if (likely((error < 0) || thd->transaction.stmt.modified_non_trans_table))
|
||||
{
|
||||
if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
|
||||
{
|
||||
|
@ -857,7 +860,7 @@ cleanup:
|
|||
}
|
||||
DBUG_ASSERT(transactional_table || !deleted || thd->transaction.stmt.modified_non_trans_table);
|
||||
|
||||
if (error < 0 ||
|
||||
if (likely(error < 0) ||
|
||||
(thd->lex->ignore && !thd->is_error() && !thd->is_fatal_error))
|
||||
{
|
||||
if (thd->lex->analyze_stmt)
|
||||
|
@ -1122,7 +1125,8 @@ multi_delete::initialize_tables(JOIN *join)
|
|||
Unique **tempfiles_ptr;
|
||||
DBUG_ENTER("initialize_tables");
|
||||
|
||||
if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join))
|
||||
if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
|
||||
error_if_full_join(join)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
table_map tables_to_delete_from=0;
|
||||
|
@ -1252,7 +1256,7 @@ int multi_delete::send_data(List<Item> &values)
|
|||
table->status|= STATUS_DELETED;
|
||||
|
||||
error= table->delete_row();
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
deleted++;
|
||||
if (!table->file->has_transactions())
|
||||
|
@ -1275,7 +1279,7 @@ int multi_delete::send_data(List<Item> &values)
|
|||
else
|
||||
{
|
||||
error=tempfiles[secure_counter]->unique_add((char*) table->file->ref);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
error= 1; // Fatal error
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1371,19 +1375,19 @@ int multi_delete::do_deletes()
|
|||
{
|
||||
TABLE *table = table_being_deleted->table;
|
||||
int local_error;
|
||||
if (tempfiles[counter]->get(table))
|
||||
if (unlikely(tempfiles[counter]->get(table)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
local_error= do_table_deletes(table, &tempfiles[counter]->sort,
|
||||
thd->lex->ignore);
|
||||
|
||||
if (thd->killed && !local_error)
|
||||
if (unlikely(thd->killed) && likely(!local_error))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if (local_error == -1) // End of file
|
||||
local_error = 0;
|
||||
if (unlikely(local_error == -1)) // End of file
|
||||
local_error= 0;
|
||||
|
||||
if (local_error)
|
||||
if (unlikely(local_error))
|
||||
DBUG_RETURN(local_error);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
|
@ -1413,7 +1417,8 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
|
|||
ha_rows last_deleted= deleted;
|
||||
DBUG_ENTER("do_deletes_for_table");
|
||||
|
||||
if (init_read_record(&info, thd, table, NULL, sort_info, 0, 1, FALSE))
|
||||
if (unlikely(init_read_record(&info, thd, table, NULL, sort_info, 0, 1,
|
||||
FALSE)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
|
@ -1422,18 +1427,18 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
|
|||
*/
|
||||
info.ignore_not_found_rows= 1;
|
||||
bool will_batch= !table->file->start_bulk_delete();
|
||||
while (!(local_error= info.read_record()) && !thd->killed)
|
||||
while (likely(!(local_error= info.read_record())) && likely(!thd->killed))
|
||||
{
|
||||
if (table->triggers &&
|
||||
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
|
||||
TRG_ACTION_BEFORE, FALSE))
|
||||
unlikely(table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
|
||||
TRG_ACTION_BEFORE, FALSE)))
|
||||
{
|
||||
local_error= 1;
|
||||
break;
|
||||
}
|
||||
|
||||
local_error= table->delete_row();
|
||||
if (local_error && !ignore)
|
||||
if (unlikely(local_error) && !ignore)
|
||||
{
|
||||
table->file->print_error(local_error, MYF(0));
|
||||
break;
|
||||
|
@ -1444,7 +1449,7 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
|
|||
during ha_delete_row.
|
||||
Also, don't execute the AFTER trigger if the row operation failed.
|
||||
*/
|
||||
if (!local_error)
|
||||
if (unlikely(!local_error))
|
||||
{
|
||||
deleted++;
|
||||
if (table->triggers &&
|
||||
|
@ -1459,7 +1464,7 @@ int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
|
|||
if (will_batch)
|
||||
{
|
||||
int tmp_error= table->file->end_bulk_delete();
|
||||
if (tmp_error && !local_error)
|
||||
if (unlikely(tmp_error) && !local_error)
|
||||
{
|
||||
local_error= tmp_error;
|
||||
table->file->print_error(local_error, MYF(0));
|
||||
|
@ -1507,28 +1512,30 @@ bool multi_delete::send_eof()
|
|||
{
|
||||
query_cache_invalidate3(thd, delete_tables, 1);
|
||||
}
|
||||
if ((local_error == 0) || thd->transaction.stmt.modified_non_trans_table)
|
||||
if (likely((local_error == 0) ||
|
||||
thd->transaction.stmt.modified_non_trans_table))
|
||||
{
|
||||
if(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
|
||||
{
|
||||
int errcode= 0;
|
||||
if (local_error == 0)
|
||||
if (likely(local_error == 0))
|
||||
thd->clear_error();
|
||||
else
|
||||
errcode= query_error_code(thd, killed_status == NOT_KILLED);
|
||||
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
|
||||
thd->query(), thd->query_length(),
|
||||
transactional_tables, FALSE, FALSE, errcode) &&
|
||||
if (unlikely(thd->binlog_query(THD::ROW_QUERY_TYPE,
|
||||
thd->query(), thd->query_length(),
|
||||
transactional_tables, FALSE, FALSE,
|
||||
errcode)) &&
|
||||
!normal_tables)
|
||||
{
|
||||
local_error=1; // Log write failed: roll back the SQL statement
|
||||
}
|
||||
}
|
||||
}
|
||||
if (local_error != 0)
|
||||
if (unlikely(local_error != 0))
|
||||
error_handled= TRUE; // to force early leave from ::abort_result_set()
|
||||
|
||||
if (!local_error && !thd->lex->analyze_stmt)
|
||||
if (likely(!local_error && !thd->lex->analyze_stmt))
|
||||
{
|
||||
::my_ok(thd, deleted);
|
||||
}
|
||||
|
|
|
@ -674,7 +674,8 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
|
|||
table reference from a subquery for this.
|
||||
*/
|
||||
DBUG_ASSERT(derived->with->get_sq_rec_ref());
|
||||
if (mysql_derived_prepare(lex->thd, lex, derived->with->get_sq_rec_ref()))
|
||||
if (unlikely(mysql_derived_prepare(lex->thd, lex,
|
||||
derived->with->get_sq_rec_ref())))
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
|
@ -698,7 +699,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
|
|||
&derived->alias, FALSE, FALSE, FALSE, 0);
|
||||
thd->create_tmp_table_for_derived= FALSE;
|
||||
|
||||
if (!res && !derived->table)
|
||||
if (likely(!res) && !derived->table)
|
||||
{
|
||||
derived->derived_result->set_unit(unit);
|
||||
derived->table= derived->derived_result->table;
|
||||
|
|
|
@ -35,7 +35,7 @@ bool mysql_do(THD *thd, List<Item> &values)
|
|||
(void) value->is_null();
|
||||
free_underlaid_joins(thd, &thd->lex->select_lex);
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
/*
|
||||
Rollback the effect of the statement, since next instruction
|
||||
|
|
|
@ -337,7 +337,7 @@ Diagnostics_area::set_ok_status(ulonglong affected_rows,
|
|||
In production, refuse to overwrite an error or a custom response
|
||||
with an OK packet.
|
||||
*/
|
||||
if (is_error() || is_disabled())
|
||||
if (unlikely(is_error() || is_disabled()))
|
||||
return;
|
||||
/*
|
||||
When running a bulk operation, m_status will be DA_OK for the first
|
||||
|
@ -377,7 +377,7 @@ Diagnostics_area::set_eof_status(THD *thd)
|
|||
In production, refuse to overwrite an error or a custom response
|
||||
with an EOF packet.
|
||||
*/
|
||||
if (is_error() || is_disabled())
|
||||
if (unlikely(is_error() || is_disabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
|
@ -270,10 +270,11 @@ my_bool Expression_cache_tmptable::put_value(Item *value)
|
|||
|
||||
*(items.head_ref())= value;
|
||||
fill_record(table_thd, cache_table, cache_table->field, items, TRUE, TRUE);
|
||||
if (table_thd->is_error())
|
||||
if (unlikely(table_thd->is_error()))
|
||||
goto err;;
|
||||
|
||||
if ((error= cache_table->file->ha_write_tmp_row(cache_table->record[0])))
|
||||
if (unlikely((error=
|
||||
cache_table->file->ha_write_tmp_row(cache_table->record[0]))))
|
||||
{
|
||||
/* create_myisam_from_heap will generate error if needed */
|
||||
if (cache_table->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
|
|
|
@ -69,7 +69,7 @@ Sql_cmd_get_diagnostics::execute(THD *thd)
|
|||
const char *sqlstate= new_stmt_da.get_sqlstate();
|
||||
|
||||
/* In case of a fatal error, set it into the original DA.*/
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
{
|
||||
save_stmt_da->set_error_status(sql_errno, message, sqlstate, NULL);
|
||||
DBUG_RETURN(true);
|
||||
|
@ -81,7 +81,7 @@ Sql_cmd_get_diagnostics::execute(THD *thd)
|
|||
message);
|
||||
|
||||
/* Appending might have failed. */
|
||||
if (! (rv= thd->is_error()))
|
||||
if (unlikely(!(rv= thd->is_error())))
|
||||
thd->get_stmt_da()->set_ok_status(0, 0, NULL);
|
||||
|
||||
DBUG_RETURN(rv);
|
||||
|
|
|
@ -345,7 +345,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
|
|||
error= (thd->open_temporary_tables(tables) ||
|
||||
open_tables(thd, &tables, &counter, 0));
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
|
||||
table= tables->table;
|
||||
|
@ -371,7 +371,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
|
|||
/* The ticket returned is within a savepoint. Make a copy. */
|
||||
error= thd->mdl_context.clone_ticket(&table_list->mdl_request);
|
||||
table_list->table->mdl_ticket= table_list->mdl_request.ticket;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
@ -426,8 +426,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
|
|||
thd->set_n_backup_active_arena(&sql_handler->arena, &backup_arena);
|
||||
error= table->fill_item_list(&sql_handler->fields);
|
||||
thd->restore_active_arena(&sql_handler->arena, &backup_arena);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
|
||||
/* Always read all columns */
|
||||
|
@ -838,7 +837,7 @@ retry:
|
|||
goto retry;
|
||||
}
|
||||
|
||||
if (lock_error)
|
||||
if (unlikely(lock_error))
|
||||
goto err0; // mysql_lock_tables() printed error message already
|
||||
}
|
||||
|
||||
|
@ -880,14 +879,14 @@ retry:
|
|||
case RFIRST:
|
||||
if (keyname)
|
||||
{
|
||||
if (!(error= table->file->ha_index_or_rnd_end()) &&
|
||||
!(error= table->file->ha_index_init(keyno, 1)))
|
||||
if (likely(!(error= table->file->ha_index_or_rnd_end())) &&
|
||||
likely(!(error= table->file->ha_index_init(keyno, 1))))
|
||||
error= table->file->ha_index_first(table->record[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!(error= table->file->ha_index_or_rnd_end()) &&
|
||||
!(error= table->file->ha_rnd_init(1)))
|
||||
if (likely(!(error= table->file->ha_index_or_rnd_end())) &&
|
||||
likely(!(error= table->file->ha_rnd_init(1))))
|
||||
error= table->file->ha_rnd_next(table->record[0]);
|
||||
}
|
||||
mode= RNEXT;
|
||||
|
@ -906,8 +905,8 @@ retry:
|
|||
/* else fall through */
|
||||
case RLAST:
|
||||
DBUG_ASSERT(keyname != 0);
|
||||
if (!(error= table->file->ha_index_or_rnd_end()) &&
|
||||
!(error= table->file->ha_index_init(keyno, 1)))
|
||||
if (likely(!(error= table->file->ha_index_or_rnd_end())) &&
|
||||
likely(!(error= table->file->ha_index_init(keyno, 1))))
|
||||
error= table->file->ha_index_last(table->record[0]);
|
||||
mode=RPREV;
|
||||
break;
|
||||
|
@ -921,13 +920,13 @@ retry:
|
|||
{
|
||||
DBUG_ASSERT(keyname != 0);
|
||||
|
||||
if (!(key= (uchar*) thd->calloc(ALIGN_SIZE(handler->key_len))))
|
||||
if (unlikely(!(key= (uchar*) thd->calloc(ALIGN_SIZE(handler->key_len)))))
|
||||
goto err;
|
||||
if ((error= table->file->ha_index_or_rnd_end()))
|
||||
if (unlikely((error= table->file->ha_index_or_rnd_end())))
|
||||
break;
|
||||
key_copy(key, table->record[0], table->key_info + keyno,
|
||||
handler->key_len);
|
||||
if (!(error= table->file->ha_index_init(keyno, 1)))
|
||||
if (unlikely(!(error= table->file->ha_index_init(keyno, 1))))
|
||||
error= table->file->ha_index_read_map(table->record[0],
|
||||
key, handler->keypart_map,
|
||||
ha_rkey_mode);
|
||||
|
@ -940,7 +939,7 @@ retry:
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
continue;
|
||||
|
|
|
@ -618,8 +618,9 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond,
|
|||
table->covering_keys.clear_all();
|
||||
|
||||
SQL_SELECT *res= make_select(table, 0, 0, cond, 0, 0, error);
|
||||
if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) ||
|
||||
(res && res->quick && res->quick->reset()))
|
||||
if (unlikely(*error) ||
|
||||
(likely(res) && unlikely(res->check_quick(thd, 0, HA_POS_ERROR))) ||
|
||||
(likely(res) && res->quick && unlikely(res->quick->reset())))
|
||||
{
|
||||
delete res;
|
||||
res=0;
|
||||
|
@ -658,7 +659,7 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, size_t mlen,
|
|||
pfname->charset()),
|
||||
new (mem_root) Item_string_ascii(thd, "\\"),
|
||||
FALSE);
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
return 0; // OOM
|
||||
return prepare_simple_select(thd, cond, table, error);
|
||||
}
|
||||
|
|
|
@ -963,8 +963,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
*/
|
||||
restore_record(table,s->default_values); // Get empty record
|
||||
table->reset_default_fields();
|
||||
if (fill_record_n_invoke_before_triggers(thd, table, fields, *values, 0,
|
||||
TRG_EVENT_INSERT))
|
||||
if (unlikely(fill_record_n_invoke_before_triggers(thd, table, fields,
|
||||
*values, 0,
|
||||
TRG_EVENT_INSERT)))
|
||||
{
|
||||
if (values_list.elements != 1 && ! thd->is_error())
|
||||
{
|
||||
|
@ -987,7 +988,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
INSERT INTO t1 VALUES (values)
|
||||
*/
|
||||
if (thd->lex->used_tables || // Column used in values()
|
||||
table->s->visible_fields != table->s->fields)
|
||||
table->s->visible_fields != table->s->fields)
|
||||
restore_record(table,s->default_values); // Get empty record
|
||||
else
|
||||
{
|
||||
|
@ -1008,9 +1009,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
}
|
||||
}
|
||||
table->reset_default_fields();
|
||||
if (fill_record_n_invoke_before_triggers(thd, table,
|
||||
table->field_to_fill(),
|
||||
*values, 0, TRG_EVENT_INSERT))
|
||||
if (unlikely(fill_record_n_invoke_before_triggers(thd, table,
|
||||
table->
|
||||
field_to_fill(),
|
||||
*values, 0,
|
||||
TRG_EVENT_INSERT)))
|
||||
{
|
||||
if (values_list.elements != 1 && ! thd->is_error())
|
||||
{
|
||||
|
@ -1023,16 +1026,16 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
}
|
||||
|
||||
/*
|
||||
with triggers a field can get a value *conditionally*, so we have to repeat
|
||||
has_no_default_value() check for every row
|
||||
with triggers a field can get a value *conditionally*, so we have to
|
||||
repeat has_no_default_value() check for every row
|
||||
*/
|
||||
if (table->triggers &&
|
||||
table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE))
|
||||
{
|
||||
for (Field **f=table->field ; *f ; f++)
|
||||
{
|
||||
if (!(*f)->has_explicit_value() &&
|
||||
has_no_default_value(thd, *f, table_list))
|
||||
if (unlikely(!(*f)->has_explicit_value() &&
|
||||
has_no_default_value(thd, *f, table_list)))
|
||||
{
|
||||
error= 1;
|
||||
goto values_loop_end;
|
||||
|
@ -1064,7 +1067,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
else
|
||||
#endif
|
||||
error=write_record(thd, table ,&info);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
break;
|
||||
thd->get_stmt_da()->inc_current_row_for_warning();
|
||||
}
|
||||
|
@ -1081,9 +1084,9 @@ values_loop_end:
|
|||
user
|
||||
*/
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
if (lock_type == TL_WRITE_DELAYED)
|
||||
if (unlikely(lock_type == TL_WRITE_DELAYED))
|
||||
{
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
info.copied=values_list.elements;
|
||||
end_delayed_insert(thd);
|
||||
|
@ -1097,7 +1100,8 @@ values_loop_end:
|
|||
auto_inc values from the delayed_insert thread as they share TABLE.
|
||||
*/
|
||||
table->file->ha_release_auto_increment();
|
||||
if (using_bulk_insert && table->file->ha_end_bulk_insert() && !error)
|
||||
if (using_bulk_insert && unlikely(table->file->ha_end_bulk_insert()) &&
|
||||
!error)
|
||||
{
|
||||
table->file->print_error(my_errno,MYF(0));
|
||||
error=1;
|
||||
|
@ -1107,7 +1111,7 @@ values_loop_end:
|
|||
|
||||
transactional_table= table->file->has_transactions();
|
||||
|
||||
if ((changed= (info.copied || info.deleted || info.updated)))
|
||||
if (likely(changed= (info.copied || info.deleted || info.updated)))
|
||||
{
|
||||
/*
|
||||
Invalidate the table in the query cache if something changed.
|
||||
|
@ -1212,7 +1216,7 @@ values_loop_end:
|
|||
(!table->triggers || !table->triggers->has_delete_triggers()))
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto abort;
|
||||
if (thd->lex->analyze_stmt)
|
||||
{
|
||||
|
@ -1692,7 +1696,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
if (info->handle_duplicates == DUP_REPLACE ||
|
||||
info->handle_duplicates == DUP_UPDATE)
|
||||
{
|
||||
while ((error=table->file->ha_write_row(table->record[0])))
|
||||
while (unlikely(error=table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
uint key_nr;
|
||||
/*
|
||||
|
@ -1725,7 +1729,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
}
|
||||
goto err;
|
||||
}
|
||||
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
|
||||
if (unlikely((int) (key_nr = table->file->get_dup_key(error)) < 0))
|
||||
{
|
||||
error= HA_ERR_FOUND_DUPP_KEY; /* Database can't find key */
|
||||
goto err;
|
||||
|
@ -1836,8 +1840,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
info->touched++;
|
||||
if (different_records)
|
||||
{
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{
|
||||
if (info->ignore &&
|
||||
|
@ -1935,11 +1939,11 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
bitmap_set_bit(table->write_set, table->vers_start_field()->field_index);
|
||||
table->vers_start_field()->store(0, false);
|
||||
}
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely(error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
goto err;
|
||||
if (error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (likely(!error))
|
||||
{
|
||||
info->deleted++;
|
||||
if (table->versioned(VERS_TIMESTAMP))
|
||||
|
@ -1947,12 +1951,12 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
store_record(table, record[2]);
|
||||
error= vers_insert_history_row(table);
|
||||
restore_record(table, record[2]);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
else
|
||||
error= 0;
|
||||
error= 0; // error was HA_ERR_RECORD_IS_THE_SAME
|
||||
thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
|
||||
/*
|
||||
Since we pretend that we have done insert we should call
|
||||
|
@ -1979,7 +1983,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
table->record[0]);
|
||||
restore_record(table,insert_values);
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
if (!table->versioned(VERS_TIMESTAMP))
|
||||
info->deleted++;
|
||||
|
@ -2019,7 +2023,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
|
|||
table->write_set != save_write_set)
|
||||
table->column_bitmaps_set(save_read_set, save_write_set);
|
||||
}
|
||||
else if ((error=table->file->ha_write_row(table->record[0])))
|
||||
else if (unlikely((error=table->file->ha_write_row(table->record[0]))))
|
||||
{
|
||||
DEBUG_SYNC(thd, "write_row_noreplace");
|
||||
if (!info->ignore ||
|
||||
|
@ -2606,11 +2610,12 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
|
|||
share->default_fields)
|
||||
{
|
||||
bool error_reported= FALSE;
|
||||
if (!(copy->def_vcol_set= (MY_BITMAP*) alloc_root(client_thd->mem_root,
|
||||
sizeof(MY_BITMAP))))
|
||||
if (unlikely(!(copy->def_vcol_set=
|
||||
(MY_BITMAP*) alloc_root(client_thd->mem_root,
|
||||
sizeof(MY_BITMAP)))))
|
||||
goto error;
|
||||
|
||||
if (parse_vcol_defs(client_thd, client_thd->mem_root, copy, &error_reported))
|
||||
if (unlikely(parse_vcol_defs(client_thd, client_thd->mem_root, copy,
|
||||
&error_reported)))
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -3362,7 +3367,7 @@ bool Delayed_insert::handle_inserts(void)
|
|||
thd.clear_error(); // reset error for binlog
|
||||
|
||||
tmp_error= 0;
|
||||
if (table->vfield)
|
||||
if (unlikely(table->vfield))
|
||||
{
|
||||
/*
|
||||
Virtual fields where not calculated by caller as the temporary
|
||||
|
@ -3373,7 +3378,7 @@ bool Delayed_insert::handle_inserts(void)
|
|||
VCOL_UPDATE_FOR_WRITE);
|
||||
}
|
||||
|
||||
if (tmp_error || write_record(&thd, table, &info))
|
||||
if (unlikely(tmp_error) || unlikely(write_record(&thd, table, &info)))
|
||||
{
|
||||
info.error_count++; // Ignore errors
|
||||
thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status);
|
||||
|
@ -3420,7 +3425,7 @@ bool Delayed_insert::handle_inserts(void)
|
|||
mysql_cond_broadcast(&cond_client); // If waiting clients
|
||||
THD_STAGE_INFO(&thd, stage_reschedule);
|
||||
mysql_mutex_unlock(&mutex);
|
||||
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
|
||||
if (unlikely((error=table->file->extra(HA_EXTRA_NO_CACHE))))
|
||||
{
|
||||
/* This should never happen */
|
||||
table->file->print_error(error,MYF(0));
|
||||
|
@ -3472,7 +3477,7 @@ bool Delayed_insert::handle_inserts(void)
|
|||
thd.binlog_flush_pending_rows_event(TRUE, has_trans))
|
||||
goto err;
|
||||
|
||||
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
|
||||
if (unlikely((error=table->file->extra(HA_EXTRA_NO_CACHE))))
|
||||
{ // This shouldn't happen
|
||||
table->file->print_error(error,MYF(0));
|
||||
sql_print_error("%s", thd.get_stmt_da()->message());
|
||||
|
@ -3825,15 +3830,16 @@ int select_insert::send_data(List<Item> &values)
|
|||
unit->offset_limit_cnt--;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
if (thd->killed == ABORT_QUERY)
|
||||
if (unlikely(thd->killed == ABORT_QUERY))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields
|
||||
store_values(values);
|
||||
if (table->default_field && table->update_default_fields(0, info.ignore))
|
||||
if (table->default_field &&
|
||||
unlikely(table->update_default_fields(0, info.ignore)))
|
||||
DBUG_RETURN(1);
|
||||
thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
table->auto_increment_field_not_null= FALSE;
|
||||
DBUG_RETURN(1);
|
||||
|
@ -3854,7 +3860,7 @@ int select_insert::send_data(List<Item> &values)
|
|||
table->vers_write= table->versioned();
|
||||
table->auto_increment_field_not_null= FALSE;
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
if (table->triggers || info.handle_duplicates == DUP_UPDATE)
|
||||
{
|
||||
|
@ -3913,18 +3919,18 @@ bool select_insert::prepare_eof()
|
|||
DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
|
||||
trans_table, table->file->table_type()));
|
||||
|
||||
error = IF_WSREP((thd->wsrep_conflict_state == MUST_ABORT ||
|
||||
thd->wsrep_conflict_state == CERT_FAILURE) ? -1 :, )
|
||||
(thd->locked_tables_mode <= LTM_LOCK_TABLES ?
|
||||
table->file->ha_end_bulk_insert() : 0);
|
||||
error= (IF_WSREP((thd->wsrep_conflict_state == MUST_ABORT ||
|
||||
thd->wsrep_conflict_state == CERT_FAILURE) ? -1 :, )
|
||||
(thd->locked_tables_mode <= LTM_LOCK_TABLES ?
|
||||
table->file->ha_end_bulk_insert() : 0));
|
||||
|
||||
if (!error && thd->is_error())
|
||||
if (likely(!error) && unlikely(thd->is_error()))
|
||||
error= thd->get_stmt_da()->sql_errno();
|
||||
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
|
||||
if ((changed= (info.copied || info.deleted || info.updated)))
|
||||
if (likely((changed= (info.copied || info.deleted || info.updated))))
|
||||
{
|
||||
/*
|
||||
We must invalidate the table in the query cache before binlog writing
|
||||
|
@ -3948,10 +3954,10 @@ bool select_insert::prepare_eof()
|
|||
ha_autocommit_or_rollback() is issued below.
|
||||
*/
|
||||
if ((WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) &&
|
||||
(!error || thd->transaction.stmt.modified_non_trans_table))
|
||||
(likely(!error) || thd->transaction.stmt.modified_non_trans_table))
|
||||
{
|
||||
int errcode= 0;
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
thd->clear_error();
|
||||
else
|
||||
errcode= query_error_code(thd, killed_status == NOT_KILLED);
|
||||
|
@ -3965,7 +3971,7 @@ bool select_insert::prepare_eof()
|
|||
}
|
||||
table->file->ha_release_auto_increment();
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table->file->print_error(error,MYF(0));
|
||||
DBUG_RETURN(true);
|
||||
|
@ -4277,9 +4283,9 @@ TABLE *select_create::create_table_from_items(THD *thd,
|
|||
else
|
||||
create_table->table= 0; // Create failed
|
||||
|
||||
if (!(table= create_table->table))
|
||||
if (unlikely(!(table= create_table->table)))
|
||||
{
|
||||
if (!thd->is_error()) // CREATE ... IF NOT EXISTS
|
||||
if (likely(!thd->is_error())) // CREATE ... IF NOT EXISTS
|
||||
my_ok(thd); // succeed, but did nothing
|
||||
DBUG_RETURN(NULL);
|
||||
}
|
||||
|
@ -4293,8 +4299,8 @@ TABLE *select_create::create_table_from_items(THD *thd,
|
|||
since it won't wait for the table lock (we have exclusive metadata lock on
|
||||
the table) and thus can't get aborted.
|
||||
*/
|
||||
if (! ((*lock)= mysql_lock_tables(thd, &table, 1, 0)) ||
|
||||
hooks->postlock(&table, 1))
|
||||
if (unlikely(!((*lock)= mysql_lock_tables(thd, &table, 1, 0)) ||
|
||||
hooks->postlock(&table, 1)))
|
||||
{
|
||||
/* purecov: begin tested */
|
||||
/*
|
||||
|
@ -4364,14 +4370,15 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
|
|||
|
||||
create_table->next_global= save_next_global;
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
|
||||
TABLE const *const table = *tables;
|
||||
if (thd->is_current_stmt_binlog_format_row() &&
|
||||
!table->s->tmp_table)
|
||||
{
|
||||
if (int error= ptr->binlog_show_create_table(tables, count))
|
||||
int error;
|
||||
if (unlikely((error= ptr->binlog_show_create_table(tables, count))))
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -2248,7 +2248,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
|
|||
goto finish2;
|
||||
|
||||
/* Prepare to retrieve all records of the joined table */
|
||||
if ((error= join_tab_scan->open()))
|
||||
if (unlikely((error= join_tab_scan->open())))
|
||||
{
|
||||
/*
|
||||
TODO: if we get here, we will assert in net_send_statement(). Add test
|
||||
|
@ -2259,7 +2259,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
|
|||
|
||||
while (!(error= join_tab_scan->next()))
|
||||
{
|
||||
if (join->thd->check_killed())
|
||||
if (unlikely(join->thd->check_killed()))
|
||||
{
|
||||
/* The user has aborted the execution of the query */
|
||||
join->thd->send_kill_message();
|
||||
|
@ -2411,7 +2411,7 @@ enum_nested_loop_state JOIN_CACHE::generate_full_extensions(uchar *rec_ptr)
|
|||
DBUG_RETURN(rc);
|
||||
}
|
||||
}
|
||||
else if (join->thd->is_error())
|
||||
else if (unlikely(join->thd->is_error()))
|
||||
rc= NESTED_LOOP_ERROR;
|
||||
DBUG_RETURN(rc);
|
||||
}
|
||||
|
@ -2533,7 +2533,7 @@ enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last)
|
|||
|
||||
for ( ; cnt; cnt--)
|
||||
{
|
||||
if (join->thd->check_killed())
|
||||
if (unlikely(join->thd->check_killed()))
|
||||
{
|
||||
/* The user has aborted the execution of the query */
|
||||
join->thd->send_kill_message();
|
||||
|
@ -3392,7 +3392,7 @@ int JOIN_TAB_SCAN::next()
|
|||
|
||||
while (!err && select && (skip_rc= select->skip_record(thd)) <= 0)
|
||||
{
|
||||
if (thd->check_killed() || skip_rc < 0)
|
||||
if (unlikely(thd->check_killed()) || skip_rc < 0)
|
||||
return 1;
|
||||
/*
|
||||
Move to the next record if the last retrieved record does not
|
||||
|
|
417
sql/sql_lex.cc
417
sql/sql_lex.cc
File diff suppressed because it is too large
Load diff
|
@ -593,7 +593,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
|||
*ex->field_term, *ex->line_start,
|
||||
*ex->line_term, *ex->enclosed,
|
||||
info.escape_char, read_file_from_client, is_fifo);
|
||||
if (read_info.error)
|
||||
if (unlikely(read_info.error))
|
||||
{
|
||||
if (file >= 0)
|
||||
mysql_file_close(file, MYF(0)); // no files in net reading
|
||||
|
@ -625,7 +625,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
|||
}
|
||||
|
||||
thd_proc_info(thd, "Reading file");
|
||||
if (!(error= MY_TEST(read_info.error)))
|
||||
if (likely(!(error= MY_TEST(read_info.error))))
|
||||
{
|
||||
table->reset_default_fields();
|
||||
table->next_number_field=table->found_next_number_field;
|
||||
|
@ -662,7 +662,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
|||
*ex->enclosed, skip_lines, ignore);
|
||||
|
||||
thd_proc_info(thd, "End bulk insert");
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
thd_progress_next_stage(thd);
|
||||
if (thd->locked_tables_mode <= LTM_LOCK_TABLES &&
|
||||
table->file->ha_end_bulk_insert() && !error)
|
||||
|
@ -787,7 +787,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
|||
*/
|
||||
error= error || mysql_bin_log.get_log_file()->error;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
#endif /*!EMBEDDED_LIBRARY*/
|
||||
|
@ -1110,11 +1110,11 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
|
|||
}
|
||||
}
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
read_info.error= 1;
|
||||
|
||||
if (read_info.error)
|
||||
if (unlikely(read_info.error))
|
||||
break;
|
||||
|
||||
if (skip_lines)
|
||||
{
|
||||
skip_lines--;
|
||||
|
@ -1129,16 +1129,16 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
|
|||
{
|
||||
Load_data_outvar *dst= item->get_load_data_outvar_or_error();
|
||||
DBUG_ASSERT(dst);
|
||||
if (dst->load_data_set_no_data(thd, &read_info))
|
||||
if (unlikely(dst->load_data_set_no_data(thd, &read_info)))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (thd->killed ||
|
||||
fill_record_n_invoke_before_triggers(thd, table, set_fields,
|
||||
set_values,
|
||||
ignore_check_option_errors,
|
||||
TRG_EVENT_INSERT))
|
||||
if (unlikely(thd->killed) ||
|
||||
unlikely(fill_record_n_invoke_before_triggers(thd, table, set_fields,
|
||||
set_values,
|
||||
ignore_check_option_errors,
|
||||
TRG_EVENT_INSERT)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
switch (table_list->view_check_option(thd,
|
||||
|
@ -1247,7 +1247,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
|
|||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
if (read_info.error)
|
||||
if (unlikely(read_info.error))
|
||||
break;
|
||||
|
||||
if (skip_lines)
|
||||
|
|
146
sql/sql_parse.cc
146
sql/sql_parse.cc
|
@ -952,7 +952,7 @@ static char *fgets_fn(char *buffer, size_t size, fgets_input_t input, int *error
|
|||
{
|
||||
MYSQL_FILE *in= static_cast<MYSQL_FILE*> (input);
|
||||
char *line= mysql_file_fgets(buffer, (int)size, in);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
*error= (line == NULL) ? ferror(in->m_file) : 0;
|
||||
return line;
|
||||
}
|
||||
|
@ -1066,7 +1066,7 @@ static void handle_bootstrap_impl(THD *thd)
|
|||
#endif
|
||||
delete_explain_query(thd->lex);
|
||||
|
||||
if (bootstrap_error)
|
||||
if (unlikely(bootstrap_error))
|
||||
break;
|
||||
|
||||
thd->reset_kill_query(); /* Ensure that killed_errmsg is released */
|
||||
|
@ -1288,7 +1288,7 @@ bool do_command(THD *thd)
|
|||
}
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
if (packet_length == packet_error)
|
||||
if (unlikely(packet_length == packet_error))
|
||||
{
|
||||
DBUG_PRINT("info",("Got error %d reading command from socket %s",
|
||||
net->error,
|
||||
|
@ -1686,8 +1686,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
|||
{
|
||||
LEX_CSTRING tmp;
|
||||
status_var_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB]);
|
||||
if (thd->copy_with_error(system_charset_info, (LEX_STRING*) &tmp,
|
||||
thd->charset(), packet, packet_length))
|
||||
if (unlikely(thd->copy_with_error(system_charset_info, (LEX_STRING*) &tmp,
|
||||
thd->charset(), packet, packet_length)))
|
||||
break;
|
||||
if (!mysql_change_db(thd, &tmp, FALSE))
|
||||
{
|
||||
|
@ -1819,7 +1819,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
|||
thd->m_digest= & thd->m_digest_state;
|
||||
thd->m_digest->reset(thd->m_token_array, max_digest_length);
|
||||
|
||||
if (alloc_query(thd, packet, packet_length))
|
||||
if (unlikely(alloc_query(thd, packet, packet_length)))
|
||||
break; // fatal error is set
|
||||
MYSQL_QUERY_START(thd->query(), thd->thread_id,
|
||||
thd->get_db(),
|
||||
|
@ -1835,7 +1835,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
|
|||
thd->query_length());
|
||||
|
||||
Parser_state parser_state;
|
||||
if (parser_state.init(thd, thd->query(), thd->query_length()))
|
||||
if (unlikely(parser_state.init(thd, thd->query(), thd->query_length())))
|
||||
break;
|
||||
|
||||
if (WSREP_ON)
|
||||
|
@ -2406,7 +2406,7 @@ com_multi_end:
|
|||
if (drop_more_results)
|
||||
thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
|
||||
|
||||
if (!thd->is_error() && !thd->killed_errno())
|
||||
if (likely(!thd->is_error() && !thd->killed_errno()))
|
||||
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_RESULT, 0, 0);
|
||||
|
||||
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
|
||||
|
@ -3933,7 +3933,7 @@ mysql_execute_command(THD *thd)
|
|||
{
|
||||
/* New replication created */
|
||||
mi= new Master_info(&lex_mi->connection_name, relay_log_recovery);
|
||||
if (!mi || mi->error())
|
||||
if (unlikely(!mi || mi->error()))
|
||||
{
|
||||
delete mi;
|
||||
res= 1;
|
||||
|
@ -4043,7 +4043,7 @@ mysql_execute_command(THD *thd)
|
|||
copy.
|
||||
*/
|
||||
Alter_info alter_info(lex->alter_info, thd->mem_root);
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
{
|
||||
/* If out of memory when creating a copy of alter_info. */
|
||||
res= 1;
|
||||
|
@ -4185,7 +4185,7 @@ mysql_execute_command(THD *thd)
|
|||
thd->lex->create_info.options|= create_info.options;
|
||||
res= open_and_lock_tables(thd, create_info, lex->query_tables, TRUE, 0);
|
||||
thd->lex->create_info.options= save_thd_create_info_options;
|
||||
if (res)
|
||||
if (unlikely(res))
|
||||
{
|
||||
/* Got error or warning. Set res to 1 if error */
|
||||
if (!(res= thd->is_error()))
|
||||
|
@ -4197,9 +4197,9 @@ mysql_execute_command(THD *thd)
|
|||
if (create_info.or_replace() && !create_info.tmp_table())
|
||||
{
|
||||
TABLE_LIST *duplicate;
|
||||
if ((duplicate= unique_table(thd, lex->query_tables,
|
||||
lex->query_tables->next_global,
|
||||
0)))
|
||||
if (unlikely((duplicate= unique_table(thd, lex->query_tables,
|
||||
lex->query_tables->next_global,
|
||||
0))))
|
||||
{
|
||||
update_non_unique_table_error(lex->query_tables, "CREATE",
|
||||
duplicate);
|
||||
|
@ -4223,13 +4223,14 @@ mysql_execute_command(THD *thd)
|
|||
needs to be created for every execution of a PS/SP.
|
||||
Note: In wsrep-patch, CTAS is handled like a regular transaction.
|
||||
*/
|
||||
if ((result= new (thd->mem_root) select_create(thd, create_table,
|
||||
&create_info,
|
||||
&alter_info,
|
||||
select_lex->item_list,
|
||||
lex->duplicates,
|
||||
lex->ignore,
|
||||
select_tables)))
|
||||
if (unlikely((result= new (thd->mem_root)
|
||||
select_create(thd, create_table,
|
||||
&create_info,
|
||||
&alter_info,
|
||||
select_lex->item_list,
|
||||
lex->duplicates,
|
||||
lex->ignore,
|
||||
select_tables))))
|
||||
{
|
||||
/*
|
||||
CREATE from SELECT give its SELECT_LEX for SELECT,
|
||||
|
@ -4310,7 +4311,7 @@ end_with_restore_list:
|
|||
HA_CREATE_INFO create_info;
|
||||
Alter_info alter_info(lex->alter_info, thd->mem_root);
|
||||
|
||||
if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
|
||||
if (unlikely(thd->is_fatal_error)) /* out of memory creating alter_info */
|
||||
goto error;
|
||||
|
||||
DBUG_ASSERT(first_table == all_tables && first_table != 0);
|
||||
|
@ -4347,8 +4348,8 @@ end_with_restore_list:
|
|||
We don't need to ensure that only one user is using master_info
|
||||
as start_slave is protected against simultaneous usage
|
||||
*/
|
||||
if ((mi= get_master_info(&lex_mi->connection_name,
|
||||
Sql_condition::WARN_LEVEL_ERROR)))
|
||||
if (unlikely((mi= get_master_info(&lex_mi->connection_name,
|
||||
Sql_condition::WARN_LEVEL_ERROR))))
|
||||
{
|
||||
if (load_error)
|
||||
{
|
||||
|
@ -4961,17 +4962,17 @@ end_with_restore_list:
|
|||
break;
|
||||
|
||||
MYSQL_MULTI_DELETE_START(thd->query());
|
||||
if ((res= mysql_multi_delete_prepare(thd)))
|
||||
if (unlikely(res= mysql_multi_delete_prepare(thd)))
|
||||
{
|
||||
MYSQL_MULTI_DELETE_DONE(1, 0);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!thd->is_fatal_error)
|
||||
if (likely(!thd->is_fatal_error))
|
||||
{
|
||||
result= new (thd->mem_root) multi_delete(thd, aux_tables,
|
||||
lex->table_count);
|
||||
if (result)
|
||||
if (unlikely(result))
|
||||
{
|
||||
res= mysql_select(thd,
|
||||
select_lex->get_table_list(),
|
||||
|
@ -5132,9 +5133,9 @@ end_with_restore_list:
|
|||
if ((check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)
|
||||
|| open_and_lock_tables(thd, all_tables, TRUE, 0)))
|
||||
goto error;
|
||||
if (!(res= sql_set_variables(thd, lex_var_list, true)))
|
||||
if (likely(!(res= sql_set_variables(thd, lex_var_list, true))))
|
||||
{
|
||||
if (!thd->is_error())
|
||||
if (likely(!thd->is_error()))
|
||||
my_ok(thd);
|
||||
}
|
||||
else
|
||||
|
@ -6226,7 +6227,7 @@ end_with_restore_list:
|
|||
|
||||
WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
|
||||
|
||||
if ((error= alter_server(thd, &lex->server_options)))
|
||||
if (unlikely((error= alter_server(thd, &lex->server_options))))
|
||||
{
|
||||
DBUG_PRINT("info", ("problem altering server <%s>",
|
||||
lex->server_options.server_name.str));
|
||||
|
@ -6319,7 +6320,8 @@ finish:
|
|||
}
|
||||
thd->reset_kill_query();
|
||||
}
|
||||
if (thd->is_error() || (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR))
|
||||
if (unlikely(thd->is_error()) ||
|
||||
(thd->variables.option_bits & OPTION_MASTER_SQL_ERROR))
|
||||
{
|
||||
THD_STAGE_INFO(thd, stage_rollback);
|
||||
trans_rollback_stmt(thd);
|
||||
|
@ -6467,7 +6469,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
|
|||
to prepend EXPLAIN to any query and receive output for it,
|
||||
even if the query itself redirects the output.
|
||||
*/
|
||||
if (!(result= new (thd->mem_root) select_send(thd)))
|
||||
if (unlikely(!(result= new (thd->mem_root) select_send(thd))))
|
||||
return 1; /* purecov: inspected */
|
||||
thd->send_explain_fields(result, lex->describe, lex->analyze_stmt);
|
||||
|
||||
|
@ -6478,7 +6480,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
|
|||
res= mysql_explain_union(thd, &lex->unit, result);
|
||||
|
||||
/* Print EXPLAIN only if we don't have an error */
|
||||
if (!res)
|
||||
if (likely(!res))
|
||||
{
|
||||
/*
|
||||
Do like the original select_describe did: remove OFFSET from the
|
||||
|
@ -6689,7 +6691,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
|
|||
/* check access may be called twice in a row. Don't change to same stage */
|
||||
if (thd->proc_info != stage_checking_permissions.m_name)
|
||||
THD_STAGE_INFO(thd, stage_checking_permissions);
|
||||
if ((!db || !db[0]) && !thd->db.str && !dont_check_global_grants)
|
||||
if (unlikely((!db || !db[0]) && !thd->db.str && !dont_check_global_grants))
|
||||
{
|
||||
DBUG_PRINT("error",("No database"));
|
||||
if (!no_errors)
|
||||
|
@ -6698,7 +6700,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
|
|||
DBUG_RETURN(TRUE); /* purecov: tested */
|
||||
}
|
||||
|
||||
if ((db != NULL) && (db != any_db))
|
||||
if (likely((db != NULL) && (db != any_db)))
|
||||
{
|
||||
/*
|
||||
Check if this is reserved database, like information schema or
|
||||
|
@ -6768,8 +6770,8 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
|
|||
*save_priv|= sctx->master_access;
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
if (((want_access & ~sctx->master_access) & ~DB_ACLS) ||
|
||||
(! db && dont_check_global_grants))
|
||||
if (unlikely(((want_access & ~sctx->master_access) & ~DB_ACLS) ||
|
||||
(! db && dont_check_global_grants)))
|
||||
{ // We can never grant this
|
||||
DBUG_PRINT("error",("No possible access"));
|
||||
if (!no_errors)
|
||||
|
@ -6785,7 +6787,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
|
|||
DBUG_RETURN(TRUE); /* purecov: tested */
|
||||
}
|
||||
|
||||
if (db == any_db)
|
||||
if (unlikely(db == any_db))
|
||||
{
|
||||
/*
|
||||
Access granted; Allow select on *any* db.
|
||||
|
@ -7279,7 +7281,7 @@ bool check_global_access(THD *thd, ulong want_access, bool no_errors)
|
|||
char command[128];
|
||||
if ((thd->security_ctx->master_access & want_access))
|
||||
return 0;
|
||||
if (!no_errors)
|
||||
if (unlikely(!no_errors))
|
||||
{
|
||||
get_privilege_desc(command, sizeof(command), want_access);
|
||||
my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command);
|
||||
|
@ -7526,7 +7528,7 @@ void THD::reset_for_next_command(bool do_clear_error)
|
|||
DBUG_ASSERT(!spcont); /* not for substatements of routines */
|
||||
DBUG_ASSERT(!in_sub_stmt);
|
||||
|
||||
if (do_clear_error)
|
||||
if (likely(do_clear_error))
|
||||
clear_error(1);
|
||||
|
||||
free_list= 0;
|
||||
|
@ -7953,7 +7955,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
|
|||
|
||||
bool err= parse_sql(thd, parser_state, NULL, true);
|
||||
|
||||
if (!err)
|
||||
if (likely(!err))
|
||||
{
|
||||
thd->m_statement_psi=
|
||||
MYSQL_REFINE_STATEMENT(thd->m_statement_psi,
|
||||
|
@ -7968,7 +7970,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
|
|||
else
|
||||
#endif
|
||||
{
|
||||
if (! thd->is_error())
|
||||
if (likely(! thd->is_error()))
|
||||
{
|
||||
const char *found_semicolon= parser_state->m_lip.found_semicolon;
|
||||
/*
|
||||
|
@ -8062,7 +8064,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length)
|
|||
DBUG_ENTER("mysql_test_parse_for_slave");
|
||||
|
||||
Parser_state parser_state;
|
||||
if (!(error= parser_state.init(thd, rawbuf, length)))
|
||||
if (likely(!(error= parser_state.init(thd, rawbuf, length))))
|
||||
{
|
||||
lex_start(thd);
|
||||
thd->reset_for_next_command();
|
||||
|
@ -8084,7 +8086,7 @@ add_proc_to_list(THD* thd, Item *item)
|
|||
ORDER *order;
|
||||
Item **item_ptr;
|
||||
|
||||
if (!(order = (ORDER *) thd->alloc(sizeof(ORDER)+sizeof(Item*))))
|
||||
if (unlikely(!(order = (ORDER *) thd->alloc(sizeof(ORDER)+sizeof(Item*)))))
|
||||
return 1;
|
||||
item_ptr = (Item**) (order+1);
|
||||
*item_ptr= item;
|
||||
|
@ -8102,7 +8104,7 @@ bool add_to_list(THD *thd, SQL_I_List<ORDER> &list, Item *item,bool asc)
|
|||
{
|
||||
ORDER *order;
|
||||
DBUG_ENTER("add_to_list");
|
||||
if (!(order = (ORDER *) thd->alloc(sizeof(ORDER))))
|
||||
if (unlikely(!(order = (ORDER *) thd->alloc(sizeof(ORDER)))))
|
||||
DBUG_RETURN(1);
|
||||
order->item_ptr= item;
|
||||
order->item= &order->item_ptr;
|
||||
|
@ -8151,19 +8153,19 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
|
|||
LEX *lex= thd->lex;
|
||||
DBUG_ENTER("add_table_to_list");
|
||||
|
||||
if (!table)
|
||||
if (unlikely(!table))
|
||||
DBUG_RETURN(0); // End of memory
|
||||
alias_str= alias ? *alias : table->table;
|
||||
DBUG_ASSERT(alias_str.str);
|
||||
if (!MY_TEST(table_options & TL_OPTION_ALIAS) &&
|
||||
check_table_name(table->table.str, table->table.length, FALSE))
|
||||
unlikely(check_table_name(table->table.str, table->table.length, FALSE)))
|
||||
{
|
||||
my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
if (table->is_derived_table() == FALSE && table->db.str &&
|
||||
check_db_name((LEX_STRING*) &table->db))
|
||||
if (unlikely(table->is_derived_table() == FALSE && table->db.str &&
|
||||
check_db_name((LEX_STRING*) &table->db)))
|
||||
{
|
||||
my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
|
||||
DBUG_RETURN(0);
|
||||
|
@ -8171,17 +8173,17 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
|
|||
|
||||
if (!alias) /* Alias is case sensitive */
|
||||
{
|
||||
if (table->sel)
|
||||
if (unlikely(table->sel))
|
||||
{
|
||||
my_message(ER_DERIVED_MUST_HAVE_ALIAS,
|
||||
ER_THD(thd, ER_DERIVED_MUST_HAVE_ALIAS), MYF(0));
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
/* alias_str points to table->table; Let's make a copy */
|
||||
if (!(alias_str.str= (char*) thd->memdup(alias_str.str, alias_str.length+1)))
|
||||
if (unlikely(!(alias_str.str= (char*) thd->memdup(alias_str.str, alias_str.length+1))))
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
|
||||
if (unlikely(!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))))
|
||||
DBUG_RETURN(0); /* purecov: inspected */
|
||||
if (table->db.str)
|
||||
{
|
||||
|
@ -8227,7 +8229,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
|
|||
DBUG_RETURN(0);
|
||||
}
|
||||
schema_table= find_schema_table(thd, &ptr->table_name);
|
||||
if (!schema_table ||
|
||||
if (unlikely(!schema_table) ||
|
||||
(schema_table->hidden &&
|
||||
((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
|
||||
/*
|
||||
|
@ -8261,8 +8263,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
|
|||
tables ;
|
||||
tables=tables->next_local)
|
||||
{
|
||||
if (!my_strcasecmp(table_alias_charset, alias_str.str, tables->alias.str) &&
|
||||
!cmp(&ptr->db, &tables->db) && ! tables->sequence)
|
||||
if (unlikely(!my_strcasecmp(table_alias_charset, alias_str.str,
|
||||
tables->alias.str) &&
|
||||
!cmp(&ptr->db, &tables->db) && ! tables->sequence))
|
||||
{
|
||||
my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str.str); /* purecov: tested */
|
||||
DBUG_RETURN(0); /* purecov: tested */
|
||||
|
@ -8270,7 +8273,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
|
|||
}
|
||||
}
|
||||
/* Store the table reference preceding the current one. */
|
||||
if (table_list.elements > 0 && !ptr->sequence)
|
||||
if (table_list.elements > 0 && likely(!ptr->sequence))
|
||||
{
|
||||
/*
|
||||
table_list.next points to the last inserted TABLE_LIST->next_local'
|
||||
|
@ -8298,7 +8301,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
|
|||
We don't store sequences into the local list to hide them from INSERT
|
||||
and SELECT.
|
||||
*/
|
||||
if (!ptr->sequence)
|
||||
if (likely(!ptr->sequence))
|
||||
table_list.link_in_list(ptr, &ptr->next_local);
|
||||
ptr->next_name_resolution_table= NULL;
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
|
@ -8343,13 +8346,14 @@ bool st_select_lex::init_nested_join(THD *thd)
|
|||
NESTED_JOIN *nested_join;
|
||||
DBUG_ENTER("init_nested_join");
|
||||
|
||||
if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+
|
||||
sizeof(NESTED_JOIN))))
|
||||
if (unlikely(!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+
|
||||
sizeof(NESTED_JOIN)))))
|
||||
DBUG_RETURN(1);
|
||||
nested_join= ptr->nested_join=
|
||||
((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST))));
|
||||
|
||||
join_list->push_front(ptr, thd->mem_root);
|
||||
if (unlikely(join_list->push_front(ptr, thd->mem_root)))
|
||||
DBUG_RETURN(1);
|
||||
ptr->embedding= embedding;
|
||||
ptr->join_list= join_list;
|
||||
ptr->alias.str="(nested_join)";
|
||||
|
@ -8425,8 +8429,8 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd)
|
|||
List<TABLE_LIST> *embedded_list;
|
||||
DBUG_ENTER("nest_last_join");
|
||||
|
||||
if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+
|
||||
sizeof(NESTED_JOIN))))
|
||||
if (unlikely(!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+
|
||||
sizeof(NESTED_JOIN)))))
|
||||
DBUG_RETURN(0);
|
||||
nested_join= ptr->nested_join=
|
||||
((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST))));
|
||||
|
@ -8441,7 +8445,7 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd)
|
|||
for (uint i=0; i < 2; i++)
|
||||
{
|
||||
TABLE_LIST *table= join_list->pop();
|
||||
if (!table)
|
||||
if (unlikely(!table))
|
||||
DBUG_RETURN(NULL);
|
||||
table->join_list= embedded_list;
|
||||
table->embedding= ptr;
|
||||
|
@ -8932,7 +8936,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
|
|||
|
||||
*rows= 0;
|
||||
|
||||
if (thd->is_fatal_error) // If we run out of memory
|
||||
if (unlikely(thd->is_fatal_error)) // If we run out of memory
|
||||
DBUG_RETURN(ER_OUT_OF_RESOURCES);
|
||||
|
||||
DBUG_PRINT("enter", ("user: %s signal: %u", user->user.str,
|
||||
|
@ -9002,7 +9006,7 @@ static
|
|||
void sql_kill(THD *thd, longlong id, killed_state state, killed_type type)
|
||||
{
|
||||
uint error;
|
||||
if (!(error= kill_one_thread(thd, id, state, type)))
|
||||
if (likely(!(error= kill_one_thread(thd, id, state, type))))
|
||||
{
|
||||
if (!thd->killed)
|
||||
my_ok(thd);
|
||||
|
@ -9019,7 +9023,7 @@ void sql_kill_user(THD *thd, LEX_USER *user, killed_state state)
|
|||
{
|
||||
uint error;
|
||||
ha_rows rows;
|
||||
if (!(error= kill_threads_for_user(thd, user, state, &rows)))
|
||||
if (likely(!(error= kill_threads_for_user(thd, user, state, &rows))))
|
||||
my_ok(thd, rows);
|
||||
else
|
||||
{
|
||||
|
@ -9051,7 +9055,8 @@ bool append_file_to_dir(THD *thd, const char **filename_ptr,
|
|||
/* Fix is using unix filename format on dos */
|
||||
strmov(buff,*filename_ptr);
|
||||
end=convert_dirname(buff, *filename_ptr, NullS);
|
||||
if (!(ptr= (char*) thd->alloc((size_t) (end-buff) + table_name->length + 1)))
|
||||
if (unlikely(!(ptr= (char*) thd->alloc((size_t) (end-buff) +
|
||||
table_name->length + 1))))
|
||||
return 1; // End of memory
|
||||
*filename_ptr=ptr;
|
||||
strxmov(ptr,buff,table_name->str,NullS);
|
||||
|
@ -9697,7 +9702,7 @@ LEX_USER *create_default_definer(THD *thd, bool role)
|
|||
{
|
||||
LEX_USER *definer;
|
||||
|
||||
if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))
|
||||
if (unlikely(! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER)))))
|
||||
return 0;
|
||||
|
||||
thd->get_definer(definer, role);
|
||||
|
@ -9732,7 +9737,7 @@ LEX_USER *create_definer(THD *thd, LEX_CSTRING *user_name,
|
|||
|
||||
/* Create and initialize. */
|
||||
|
||||
if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER))))
|
||||
if (unlikely(!(definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER)))))
|
||||
return 0;
|
||||
|
||||
definer->user= *user_name;
|
||||
|
@ -9795,7 +9800,8 @@ bool check_string_char_length(const LEX_CSTRING *str, uint err_msg,
|
|||
bool no_error)
|
||||
{
|
||||
Well_formed_prefix prefix(cs, str->str, str->length, max_char_length);
|
||||
if (!prefix.well_formed_error_pos() && str->length == prefix.length())
|
||||
if (likely(!prefix.well_formed_error_pos() &&
|
||||
str->length == prefix.length()))
|
||||
return FALSE;
|
||||
|
||||
if (!no_error)
|
||||
|
|
|
@ -820,7 +820,7 @@ int check_signed_flag(partition_info *part_info)
|
|||
*/
|
||||
|
||||
static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
||||
bool is_sub_part, bool is_create_table_ind)
|
||||
bool is_sub_part, bool is_create_table_ind)
|
||||
{
|
||||
partition_info *part_info= table->part_info;
|
||||
bool result= TRUE;
|
||||
|
@ -857,7 +857,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
|||
const nesting_map saved_allow_sum_func= thd->lex->allow_sum_func;
|
||||
thd->lex->allow_sum_func= 0;
|
||||
|
||||
if (!(error= func_expr->fix_fields(thd, (Item**)&func_expr)))
|
||||
if (likely(!(error= func_expr->fix_fields(thd, (Item**)&func_expr))))
|
||||
func_expr->walk(&Item::vcol_in_partition_func_processor, 0, NULL);
|
||||
|
||||
/*
|
||||
|
@ -901,7 +901,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
|
|||
ER_THD(thd, ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR));
|
||||
}
|
||||
|
||||
if ((!is_sub_part) && (error= check_signed_flag(part_info)))
|
||||
if (unlikely((!is_sub_part) && (error= check_signed_flag(part_info))))
|
||||
goto end;
|
||||
result= set_up_field_array(thd, table, is_sub_part);
|
||||
end:
|
||||
|
@ -2823,10 +2823,9 @@ static inline int part_val_int(Item *item_expr, longlong *result)
|
|||
*result= item_expr->val_int();
|
||||
if (item_expr->null_value)
|
||||
{
|
||||
if (current_thd->is_error())
|
||||
if (unlikely(current_thd->is_error()))
|
||||
return TRUE;
|
||||
else
|
||||
*result= LONGLONG_MIN;
|
||||
*result= LONGLONG_MIN;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -3534,7 +3533,7 @@ int get_partition_id_range(partition_info *part_info,
|
|||
bool unsigned_flag= part_info->part_expr->unsigned_flag;
|
||||
DBUG_ENTER("get_partition_id_range");
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
|
||||
|
||||
if (part_info->part_expr->null_value)
|
||||
|
@ -4112,7 +4111,7 @@ bool verify_data_with_partition(TABLE *table, TABLE *part_table,
|
|||
|
||||
do
|
||||
{
|
||||
if ((error= file->ha_rnd_next(table->record[0])))
|
||||
if (unlikely((error= file->ha_rnd_next(table->record[0]))))
|
||||
{
|
||||
if (error == HA_ERR_RECORD_DELETED)
|
||||
continue;
|
||||
|
@ -4122,8 +4121,8 @@ bool verify_data_with_partition(TABLE *table, TABLE *part_table,
|
|||
file->print_error(error, MYF(0));
|
||||
break;
|
||||
}
|
||||
if ((error= part_info->get_partition_id(part_info, &found_part_id,
|
||||
&func_value)))
|
||||
if (unlikely((error= part_info->get_partition_id(part_info, &found_part_id,
|
||||
&func_value))))
|
||||
{
|
||||
part_table->file->print_error(error, MYF(0));
|
||||
break;
|
||||
|
@ -4141,9 +4140,7 @@ err:
|
|||
part_info->table->move_fields(part_info->full_part_field_array, old_rec,
|
||||
table->record[0]);
|
||||
part_table->record[0]= old_rec;
|
||||
if (error)
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(FALSE);
|
||||
DBUG_RETURN(unlikely(error) ? TRUE : FALSE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -6001,9 +5998,11 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
|
|||
|
||||
/* TODO: test if bulk_insert would increase the performance */
|
||||
|
||||
if ((error= file->ha_change_partitions(lpt->create_info, path, &lpt->copied,
|
||||
&lpt->deleted, lpt->pack_frm_data,
|
||||
lpt->pack_frm_len)))
|
||||
if (unlikely((error= file->ha_change_partitions(lpt->create_info, path,
|
||||
&lpt->copied,
|
||||
&lpt->deleted,
|
||||
lpt->pack_frm_data,
|
||||
lpt->pack_frm_len))))
|
||||
{
|
||||
file->print_error(error, MYF(error != ER_OUTOFMEMORY ? 0 : ME_FATALERROR));
|
||||
}
|
||||
|
@ -6041,7 +6040,7 @@ static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
|
|||
DBUG_ENTER("mysql_rename_partitions");
|
||||
|
||||
build_table_filename(path, sizeof(path) - 1, lpt->db.str, lpt->table_name.str, "", 0);
|
||||
if ((error= lpt->table->file->ha_rename_partitions(path)))
|
||||
if (unlikely((error= lpt->table->file->ha_rename_partitions(path))))
|
||||
{
|
||||
if (error != 1)
|
||||
lpt->table->file->print_error(error, MYF(0));
|
||||
|
@ -6778,14 +6777,14 @@ static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt)
|
|||
Diagnostics_area *stmt_da= NULL;
|
||||
Diagnostics_area tmp_stmt_da(true);
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
/* reopen might fail if we have a previous error, use a temporary da. */
|
||||
stmt_da= thd->get_stmt_da();
|
||||
thd->set_stmt_da(&tmp_stmt_da);
|
||||
}
|
||||
|
||||
if (thd->locked_tables_list.reopen_tables(thd, false))
|
||||
if (unlikely(thd->locked_tables_list.reopen_tables(thd, false)))
|
||||
sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
|
||||
|
||||
if (stmt_da)
|
||||
|
@ -6984,14 +6983,14 @@ err_exclusive_lock:
|
|||
Diagnostics_area *stmt_da= NULL;
|
||||
Diagnostics_area tmp_stmt_da(true);
|
||||
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
/* reopen might fail if we have a previous error, use a temporary da. */
|
||||
stmt_da= thd->get_stmt_da();
|
||||
thd->set_stmt_da(&tmp_stmt_da);
|
||||
}
|
||||
|
||||
if (thd->locked_tables_list.reopen_tables(thd, false))
|
||||
if (unlikely(thd->locked_tables_list.reopen_tables(thd, false)))
|
||||
sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
|
||||
|
||||
if (stmt_da)
|
||||
|
|
|
@ -67,25 +67,29 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd)
|
|||
|
||||
DBUG_ENTER("Sql_cmd_alter_table_exchange_partition::execute");
|
||||
|
||||
if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
{
|
||||
/* out of memory creating a copy of alter_info */
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
/* Must be set in the parser */
|
||||
DBUG_ASSERT(select_lex->db.str);
|
||||
/* also check the table to be exchanged with the partition */
|
||||
DBUG_ASSERT(alter_info.partition_flags & ALTER_PARTITION_EXCHANGE);
|
||||
|
||||
if (check_access(thd, priv_needed, first_table->db.str,
|
||||
&first_table->grant.privilege,
|
||||
&first_table->grant.m_internal,
|
||||
0, 0) ||
|
||||
check_access(thd, priv_needed, first_table->next_local->db.str,
|
||||
&first_table->next_local->grant.privilege,
|
||||
&first_table->next_local->grant.m_internal,
|
||||
0, 0))
|
||||
if (unlikely(check_access(thd, priv_needed, first_table->db.str,
|
||||
&first_table->grant.privilege,
|
||||
&first_table->grant.m_internal,
|
||||
0, 0)) ||
|
||||
unlikely(check_access(thd, priv_needed, first_table->next_local->db.str,
|
||||
&first_table->next_local->grant.privilege,
|
||||
&first_table->next_local->grant.m_internal,
|
||||
0, 0)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE))
|
||||
if (unlikely(check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX,
|
||||
FALSE)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* Not allowed with EXCHANGE PARTITION */
|
||||
|
@ -109,31 +113,32 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd)
|
|||
|
||||
@retval FALSE if OK, otherwise error is reported and TRUE is returned.
|
||||
*/
|
||||
|
||||
static bool check_exchange_partition(TABLE *table, TABLE *part_table)
|
||||
{
|
||||
DBUG_ENTER("check_exchange_partition");
|
||||
|
||||
/* Both tables must exist */
|
||||
if (!part_table || !table)
|
||||
if (unlikely(!part_table || !table))
|
||||
{
|
||||
my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
/* The first table must be partitioned, and the second must not */
|
||||
if (!part_table->part_info)
|
||||
if (unlikely(!part_table->part_info))
|
||||
{
|
||||
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if (table->part_info)
|
||||
if (unlikely(table->part_info))
|
||||
{
|
||||
my_error(ER_PARTITION_EXCHANGE_PART_TABLE, MYF(0),
|
||||
table->s->table_name.str);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
if (part_table->file->ht != partition_hton)
|
||||
if (unlikely(part_table->file->ht != partition_hton))
|
||||
{
|
||||
/*
|
||||
Only allowed on partitioned tables throught the generic ha_partition
|
||||
|
@ -143,14 +148,14 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table)
|
|||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
if (table->file->ht != part_table->part_info->default_engine_type)
|
||||
if (unlikely(table->file->ht != part_table->part_info->default_engine_type))
|
||||
{
|
||||
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
/* Verify that table is not tmp table, partitioned tables cannot be tmp. */
|
||||
if (table->s->tmp_table != NO_TMP_TABLE)
|
||||
if (unlikely(table->s->tmp_table != NO_TMP_TABLE))
|
||||
{
|
||||
my_error(ER_PARTITION_EXCHANGE_TEMP_TABLE, MYF(0),
|
||||
table->s->table_name.str);
|
||||
|
@ -158,7 +163,7 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table)
|
|||
}
|
||||
|
||||
/* The table cannot have foreign keys constraints or be referenced */
|
||||
if(!table->file->can_switch_engines())
|
||||
if (unlikely(!table->file->can_switch_engines()))
|
||||
{
|
||||
my_error(ER_PARTITION_EXCHANGE_FOREIGN_KEY, MYF(0),
|
||||
table->s->table_name.str);
|
||||
|
@ -197,8 +202,8 @@ static bool compare_table_with_partition(THD *thd, TABLE *table,
|
|||
/* mark all columns used, since they are used when preparing the new table */
|
||||
part_table->use_all_columns();
|
||||
table->use_all_columns();
|
||||
if (mysql_prepare_alter_table(thd, part_table, &part_create_info,
|
||||
&part_alter_info, &part_alter_ctx))
|
||||
if (unlikely(mysql_prepare_alter_table(thd, part_table, &part_create_info,
|
||||
&part_alter_info, &part_alter_ctx)))
|
||||
{
|
||||
my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -336,7 +341,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
handler *file= NULL;
|
||||
DBUG_ENTER("exchange_name_with_ddl_log");
|
||||
|
||||
if (!(file= get_new_handler(NULL, thd->mem_root, ht)))
|
||||
if (unlikely(!(file= get_new_handler(NULL, thd->mem_root, ht))))
|
||||
{
|
||||
mem_alloc_error(sizeof(handler));
|
||||
DBUG_RETURN(TRUE);
|
||||
|
@ -360,12 +365,13 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
*/
|
||||
DBUG_EXECUTE_IF("exchange_partition_fail_1", goto err_no_action_written;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_1", DBUG_SUICIDE(););
|
||||
if (write_ddl_log_entry(&exchange_entry, &log_entry))
|
||||
if (unlikely(write_ddl_log_entry(&exchange_entry, &log_entry)))
|
||||
goto err_no_action_written;
|
||||
|
||||
DBUG_EXECUTE_IF("exchange_partition_fail_2", goto err_no_execute_written;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_2", DBUG_SUICIDE(););
|
||||
if (write_execute_ddl_log_entry(log_entry->entry_pos, FALSE, &exec_log_entry))
|
||||
if (unlikely(write_execute_ddl_log_entry(log_entry->entry_pos, FALSE,
|
||||
&exec_log_entry)))
|
||||
goto err_no_execute_written;
|
||||
/* ddl_log is written and synced */
|
||||
|
||||
|
@ -383,7 +389,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
error_set= TRUE;
|
||||
goto err_rename;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_3", DBUG_SUICIDE(););
|
||||
if (file->ha_rename_table(name, tmp_name))
|
||||
if (unlikely(file->ha_rename_table(name, tmp_name)))
|
||||
{
|
||||
my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name, my_errno);
|
||||
error_set= TRUE;
|
||||
|
@ -391,7 +397,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
}
|
||||
DBUG_EXECUTE_IF("exchange_partition_fail_4", goto err_rename;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_4", DBUG_SUICIDE(););
|
||||
if (deactivate_ddl_log_entry(log_entry->entry_pos))
|
||||
if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos)))
|
||||
goto err_rename;
|
||||
|
||||
/* call rename table from partition to table */
|
||||
|
@ -400,7 +406,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
error_set= TRUE;
|
||||
goto err_rename;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_5", DBUG_SUICIDE(););
|
||||
if (file->ha_rename_table(from_name, name))
|
||||
if (unlikely(file->ha_rename_table(from_name, name)))
|
||||
{
|
||||
my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name, my_errno);
|
||||
error_set= TRUE;
|
||||
|
@ -408,7 +414,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
}
|
||||
DBUG_EXECUTE_IF("exchange_partition_fail_6", goto err_rename;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_6", DBUG_SUICIDE(););
|
||||
if (deactivate_ddl_log_entry(log_entry->entry_pos))
|
||||
if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos)))
|
||||
goto err_rename;
|
||||
|
||||
/* call rename table from tmp-nam to partition */
|
||||
|
@ -417,7 +423,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
error_set= TRUE;
|
||||
goto err_rename;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_7", DBUG_SUICIDE(););
|
||||
if (file->ha_rename_table(tmp_name, from_name))
|
||||
if (unlikely(file->ha_rename_table(tmp_name, from_name)))
|
||||
{
|
||||
my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name, my_errno);
|
||||
error_set= TRUE;
|
||||
|
@ -425,7 +431,7 @@ static bool exchange_name_with_ddl_log(THD *thd,
|
|||
}
|
||||
DBUG_EXECUTE_IF("exchange_partition_fail_8", goto err_rename;);
|
||||
DBUG_EXECUTE_IF("exchange_partition_abort_8", DBUG_SUICIDE(););
|
||||
if (deactivate_ddl_log_entry(log_entry->entry_pos))
|
||||
if (unlikely(deactivate_ddl_log_entry(log_entry->entry_pos)))
|
||||
goto err_rename;
|
||||
|
||||
/* The exchange is complete and ddl_log is deactivated */
|
||||
|
@ -525,23 +531,24 @@ bool Sql_cmd_alter_table_exchange_partition::
|
|||
to be able to verify the structure/metadata.
|
||||
*/
|
||||
table_list->mdl_request.set_type(MDL_SHARED_NO_WRITE);
|
||||
if (open_tables(thd, &table_list, &table_counter, 0,
|
||||
&alter_prelocking_strategy))
|
||||
if (unlikely(open_tables(thd, &table_list, &table_counter, 0,
|
||||
&alter_prelocking_strategy)))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
part_table= table_list->table;
|
||||
swap_table= swap_table_list->table;
|
||||
|
||||
if (check_exchange_partition(swap_table, part_table))
|
||||
if (unlikely(check_exchange_partition(swap_table, part_table)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* set lock pruning on first table */
|
||||
partition_name= alter_info->partition_names.head();
|
||||
if (table_list->table->part_info->
|
||||
set_named_partition_bitmap(partition_name, strlen(partition_name)))
|
||||
if (unlikely(table_list->table->part_info->
|
||||
set_named_partition_bitmap(partition_name,
|
||||
strlen(partition_name))))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
if (lock_tables(thd, table_list, table_counter, 0))
|
||||
if (unlikely(lock_tables(thd, table_list, table_counter, 0)))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
|
||||
|
@ -569,32 +576,35 @@ bool Sql_cmd_alter_table_exchange_partition::
|
|||
table_list->next_local->db.str,
|
||||
temp_name, "", FN_IS_TMP);
|
||||
|
||||
if (!(part_elem= part_table->part_info->get_part_elem(partition_name,
|
||||
part_file_name + part_file_name_len,
|
||||
sizeof(part_file_name) - part_file_name_len,
|
||||
&swap_part_id)))
|
||||
if (unlikely(!(part_elem=
|
||||
part_table->part_info->get_part_elem(partition_name,
|
||||
part_file_name +
|
||||
part_file_name_len,
|
||||
sizeof(part_file_name) -
|
||||
part_file_name_len,
|
||||
&swap_part_id))))
|
||||
{
|
||||
// my_error(ER_UNKNOWN_PARTITION, MYF(0), partition_name,
|
||||
// part_table->alias);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
if (swap_part_id == NOT_A_PARTITION_ID)
|
||||
if (unlikely(swap_part_id == NOT_A_PARTITION_ID))
|
||||
{
|
||||
DBUG_ASSERT(part_table->part_info->is_sub_partitioned());
|
||||
my_error(ER_PARTITION_INSTEAD_OF_SUBPARTITION, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
if (compare_table_with_partition(thd, swap_table, part_table, part_elem,
|
||||
swap_part_id))
|
||||
if (unlikely(compare_table_with_partition(thd, swap_table, part_table,
|
||||
part_elem,
|
||||
swap_part_id)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/* Table and partition has same structure/options, OK to exchange */
|
||||
|
||||
thd_proc_info(thd, "Verifying data with partition");
|
||||
|
||||
if (verify_data_with_partition(swap_table, part_table, swap_part_id))
|
||||
if (unlikely(verify_data_with_partition(swap_table, part_table,
|
||||
swap_part_id)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
/*
|
||||
|
@ -621,8 +631,8 @@ bool Sql_cmd_alter_table_exchange_partition::
|
|||
|
||||
DEBUG_SYNC(thd, "swap_partition_before_rename");
|
||||
|
||||
if (exchange_name_with_ddl_log(thd, swap_file_name, part_file_name,
|
||||
temp_file_name, table_hton))
|
||||
if (unlikely(exchange_name_with_ddl_log(thd, swap_file_name, part_file_name,
|
||||
temp_file_name, table_hton)))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
|
@ -632,7 +642,8 @@ bool Sql_cmd_alter_table_exchange_partition::
|
|||
*/
|
||||
(void) thd->locked_tables_list.reopen_tables(thd, false);
|
||||
|
||||
if ((error= write_bin_log(thd, TRUE, thd->query(), thd->query_length())))
|
||||
if (unlikely((error= write_bin_log(thd, TRUE, thd->query(),
|
||||
thd->query_length()))))
|
||||
{
|
||||
/*
|
||||
The error is reported in write_bin_log().
|
||||
|
@ -651,7 +662,7 @@ err:
|
|||
part_table_mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (unlikely(!error))
|
||||
my_ok(thd);
|
||||
|
||||
// For query cache
|
||||
|
@ -823,7 +834,8 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
|
|||
|
||||
partition= (ha_partition*) first_table->table->file;
|
||||
/* Invoke the handler method responsible for truncating the partition. */
|
||||
if ((error= partition->truncate_partition(alter_info, &binlog_stmt)))
|
||||
if (unlikely(error= partition->truncate_partition(alter_info,
|
||||
&binlog_stmt)))
|
||||
partition->print_error(error, MYF(0));
|
||||
|
||||
/*
|
||||
|
@ -836,7 +848,7 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
|
|||
Since we've changed data within the table, we also have to invalidate
|
||||
the query cache for it.
|
||||
*/
|
||||
if (error != HA_ERR_WRONG_COMMAND)
|
||||
if (likely(error != HA_ERR_WRONG_COMMAND))
|
||||
{
|
||||
query_cache_invalidate3(thd, first_table, FALSE);
|
||||
if (binlog_stmt)
|
||||
|
@ -851,7 +863,7 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
|
|||
if (thd->locked_tables_mode)
|
||||
ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
|
||||
|
||||
if (! error)
|
||||
if (likely(!error))
|
||||
my_ok(thd);
|
||||
|
||||
// Invalidate query cache
|
||||
|
|
|
@ -1874,7 +1874,7 @@ static void plugin_load(MEM_ROOT *tmp_root)
|
|||
free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE));
|
||||
mysql_mutex_unlock(&LOCK_plugin);
|
||||
}
|
||||
if (error > 0)
|
||||
if (unlikely(error > 0))
|
||||
sql_print_error(ER_THD(new_thd, ER_GET_ERRNO), my_errno,
|
||||
table->file->table_type());
|
||||
end_read_record(&read_record_info);
|
||||
|
@ -2154,7 +2154,7 @@ static bool finalize_install(THD *thd, TABLE *table, const LEX_CSTRING *name,
|
|||
files_charset_info);
|
||||
error= table->file->ha_write_row(table->record[0]);
|
||||
reenable_binlog(thd);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
tmp->state= PLUGIN_IS_DELETED;
|
||||
|
@ -2219,7 +2219,7 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name,
|
|||
|
||||
mysql_mutex_lock(&LOCK_plugin);
|
||||
error= plugin_add(thd->mem_root, name, &dl, REPORT_TO_USER);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
|
||||
if (name->str)
|
||||
|
@ -2235,7 +2235,7 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name,
|
|||
}
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
reap_needed= true;
|
||||
reap_plugins();
|
||||
|
@ -2298,7 +2298,7 @@ static bool do_uninstall(THD *thd, TABLE *table, const LEX_CSTRING *name)
|
|||
tmp_disable_binlog(thd);
|
||||
error= table->file->ha_delete_row(table->record[0]);
|
||||
reenable_binlog(thd);
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
return 1;
|
||||
|
@ -2761,7 +2761,7 @@ static int check_func_set(THD *thd, struct st_mysql_sys_var *var,
|
|||
goto err;
|
||||
result= find_set(typelib, str, length, NULL,
|
||||
&error, &error_len, ¬_used);
|
||||
if (error_len)
|
||||
if (unlikely(error_len))
|
||||
goto err;
|
||||
}
|
||||
else
|
||||
|
@ -2880,7 +2880,7 @@ sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length,
|
|||
if (!locked)
|
||||
mysql_mutex_unlock(&LOCK_plugin);
|
||||
|
||||
if (!throw_error && !var)
|
||||
if (unlikely(!throw_error && !var))
|
||||
my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0),
|
||||
(int) (length ? length : strlen(str)), (char*) str);
|
||||
DBUG_RETURN(var);
|
||||
|
@ -4131,7 +4131,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
|
|||
error= handle_options(argc, &argv, opts, mark_changed);
|
||||
(*argc)++; /* add back one for the program name */
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
sql_print_error("Parsing options for plugin '%s' failed.",
|
||||
tmp->name.str);
|
||||
|
|
|
@ -380,16 +380,18 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns)
|
|||
XXX: fix this nasty upcast from List<Item_param> to List<Item>
|
||||
*/
|
||||
error= my_net_write(net, buff, sizeof(buff));
|
||||
if (stmt->param_count && ! error)
|
||||
if (stmt->param_count && likely(!error))
|
||||
{
|
||||
error= thd->protocol_text.send_result_set_metadata((List<Item> *)
|
||||
&stmt->lex->param_list,
|
||||
Protocol::SEND_EOF);
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
/* Flag that a response has already been sent */
|
||||
thd->get_stmt_da()->disable_status();
|
||||
}
|
||||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -3550,7 +3552,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
|
|||
#else
|
||||
param->set_longdata(thd->extra_data, thd->extra_length);
|
||||
#endif
|
||||
if (thd->get_stmt_da()->is_error())
|
||||
if (unlikely(thd->get_stmt_da()->is_error()))
|
||||
{
|
||||
stmt->state= Query_arena::STMT_ERROR;
|
||||
stmt->last_errno= thd->get_stmt_da()->sql_errno();
|
||||
|
@ -3596,7 +3598,7 @@ bool Select_fetch_protocol_binary::send_eof()
|
|||
Don't send EOF if we're in error condition (which implies we've already
|
||||
sent or are sending an error)
|
||||
*/
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
return true;
|
||||
|
||||
::my_eof(thd);
|
||||
|
@ -3682,7 +3684,7 @@ Execute_sql_statement::execute_server_code(THD *thd)
|
|||
|
||||
error= parse_sql(thd, &parser_state, NULL) || thd->is_error();
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto end;
|
||||
|
||||
thd->lex->set_trg_event_type_for_tables();
|
||||
|
@ -3693,7 +3695,7 @@ Execute_sql_statement::execute_server_code(THD *thd)
|
|||
thd->m_statement_psi= parent_locker;
|
||||
|
||||
/* report error issued during command execution */
|
||||
if (error == 0 && thd->spcont == NULL)
|
||||
if (likely(error == 0) && thd->spcont == NULL)
|
||||
general_log_write(thd, COM_STMT_EXECUTE,
|
||||
thd->query(), thd->query_length());
|
||||
|
||||
|
@ -3945,9 +3947,9 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
|
|||
lex_start(thd);
|
||||
lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_PREPARE;
|
||||
|
||||
error= parse_sql(thd, & parser_state, NULL) ||
|
||||
thd->is_error() ||
|
||||
init_param_array(this);
|
||||
error= (parse_sql(thd, & parser_state, NULL) ||
|
||||
thd->is_error() ||
|
||||
init_param_array(this));
|
||||
|
||||
lex->set_trg_event_type_for_tables();
|
||||
|
||||
|
@ -3979,10 +3981,10 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
|
|||
Item_null objects.
|
||||
*/
|
||||
|
||||
if (error == 0)
|
||||
if (likely(error == 0))
|
||||
error= check_prepared_statement(this);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
/*
|
||||
let the following code know we're not in PS anymore,
|
||||
|
@ -4021,7 +4023,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
|
|||
thd->restore_backup_statement(this, &stmt_backup);
|
||||
thd->stmt_arena= old_stmt_arena;
|
||||
|
||||
if (error == 0)
|
||||
if (likely(error == 0))
|
||||
{
|
||||
setup_set_params();
|
||||
lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_PREPARE;
|
||||
|
@ -4151,7 +4153,7 @@ Prepared_statement::execute_loop(String *expanded_query,
|
|||
DBUG_ASSERT(thd->free_list == NULL);
|
||||
|
||||
/* Check if we got an error when sending long data */
|
||||
if (state == Query_arena::STMT_ERROR)
|
||||
if (unlikely(state == Query_arena::STMT_ERROR))
|
||||
{
|
||||
my_message(last_errno, last_error, MYF(0));
|
||||
return TRUE;
|
||||
|
@ -4215,8 +4217,9 @@ reexecute:
|
|||
}
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
|
||||
error && !thd->is_fatal_error && !thd->killed &&
|
||||
if (unlikely(error) &&
|
||||
(sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
|
||||
!thd->is_fatal_error && !thd->killed &&
|
||||
reprepare_observer.is_invalidated() &&
|
||||
reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS)
|
||||
{
|
||||
|
@ -4225,7 +4228,7 @@ reexecute:
|
|||
|
||||
error= reprepare();
|
||||
|
||||
if (! error) /* Success */
|
||||
if (likely(!error)) /* Success */
|
||||
goto reexecute;
|
||||
}
|
||||
reset_stmt_params(this);
|
||||
|
@ -4238,7 +4241,7 @@ my_bool bulk_parameters_set(THD *thd)
|
|||
DBUG_ENTER("bulk_parameters_set");
|
||||
Prepared_statement *stmt= (Prepared_statement *) thd->bulk_param;
|
||||
|
||||
if (stmt && stmt->set_bulk_parameters(FALSE))
|
||||
if (stmt && unlikely(stmt->set_bulk_parameters(FALSE)))
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
@ -4410,8 +4413,9 @@ reexecute:
|
|||
}
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
|
||||
error && !thd->is_fatal_error && !thd->killed &&
|
||||
if (unlikely(error) &&
|
||||
(sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
|
||||
!thd->is_fatal_error && !thd->killed &&
|
||||
reprepare_observer.is_invalidated() &&
|
||||
reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS)
|
||||
{
|
||||
|
@ -4420,7 +4424,7 @@ reexecute:
|
|||
|
||||
error= reprepare();
|
||||
|
||||
if (! error) /* Success */
|
||||
if (likely(!error)) /* Success */
|
||||
goto reexecute;
|
||||
}
|
||||
}
|
||||
|
@ -4495,8 +4499,8 @@ Prepared_statement::reprepare()
|
|||
|
||||
status_var_increment(thd->status_var.com_stmt_reprepare);
|
||||
|
||||
if (mysql_opt_change_db(thd, &stmt_db_name, &saved_cur_db_name, TRUE,
|
||||
&cur_db_changed))
|
||||
if (unlikely(mysql_opt_change_db(thd, &stmt_db_name, &saved_cur_db_name,
|
||||
TRUE, &cur_db_changed)))
|
||||
return TRUE;
|
||||
|
||||
sql_mode_t save_sql_mode= thd->variables.sql_mode;
|
||||
|
@ -4509,7 +4513,7 @@ Prepared_statement::reprepare()
|
|||
if (cur_db_changed)
|
||||
mysql_change_db(thd, (LEX_CSTRING*) &saved_cur_db_name, TRUE);
|
||||
|
||||
if (! error)
|
||||
if (likely(!error))
|
||||
{
|
||||
swap_prepared_statement(©);
|
||||
swap_parameter_array(param_array, copy.param_array, param_count);
|
||||
|
@ -4808,7 +4812,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
|
|||
if (state == Query_arena::STMT_PREPARED && !qc_executed)
|
||||
state= Query_arena::STMT_EXECUTED;
|
||||
|
||||
if (error == 0 && this->lex->sql_command == SQLCOM_CALL)
|
||||
if (likely(error == 0) && this->lex->sql_command == SQLCOM_CALL)
|
||||
{
|
||||
if (is_sql_prepare())
|
||||
{
|
||||
|
@ -4843,7 +4847,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
|
|||
sub-statements inside stored procedures are not logged into
|
||||
the general log.
|
||||
*/
|
||||
if (error == 0 && thd->spcont == NULL)
|
||||
if (likely(error == 0 && thd->spcont == NULL))
|
||||
general_log_write(thd, COM_STMT_EXECUTE, thd->query(), thd->query_length());
|
||||
|
||||
error:
|
||||
|
@ -4871,7 +4875,7 @@ bool Prepared_statement::execute_immediate(const char *query, uint query_len)
|
|||
|
||||
set_sql_prepare();
|
||||
name= execute_immediate_stmt_name; // for DBUG_PRINT etc
|
||||
if (prepare(query, query_len))
|
||||
if (unlikely(prepare(query, query_len)))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
if (param_count != thd->lex->prepared_stmt_params.elements)
|
||||
|
@ -5244,7 +5248,7 @@ Protocol_local::store_string(const char *str, size_t length,
|
|||
src_cs != &my_charset_bin &&
|
||||
dst_cs != &my_charset_bin)
|
||||
{
|
||||
if (convert->copy(str, length, src_cs, dst_cs, &error_unused))
|
||||
if (unlikely(convert->copy(str, length, src_cs, dst_cs, &error_unused)))
|
||||
return TRUE;
|
||||
str= convert->ptr();
|
||||
length= convert->length();
|
||||
|
|
|
@ -74,13 +74,13 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
|
|||
If reload_acl_and_cache() is called from SIGHUP handler we have to
|
||||
allocate temporary THD for execution of acl_reload()/grant_reload().
|
||||
*/
|
||||
if (!thd && (thd= (tmp_thd= new THD(0))))
|
||||
if (unlikely(!thd) && (thd= (tmp_thd= new THD(0))))
|
||||
{
|
||||
thd->thread_stack= (char*) &tmp_thd;
|
||||
thd->store_globals();
|
||||
}
|
||||
|
||||
if (thd)
|
||||
if (likely(thd))
|
||||
{
|
||||
bool reload_acl_failed= acl_reload(thd);
|
||||
bool reload_grants_failed= grant_reload(thd);
|
||||
|
@ -98,7 +98,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
|
|||
}
|
||||
opt_noacl= 0;
|
||||
|
||||
if (tmp_thd)
|
||||
if (unlikely(tmp_thd))
|
||||
{
|
||||
delete tmp_thd;
|
||||
thd= 0;
|
||||
|
@ -123,7 +123,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
|
|||
}
|
||||
|
||||
if (options & REFRESH_ERROR_LOG)
|
||||
if (flush_error_log())
|
||||
if (unlikely(flush_error_log()))
|
||||
{
|
||||
/*
|
||||
When flush_error_log() failed, my_error() has not been called.
|
||||
|
|
|
@ -173,14 +173,14 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
|
|||
error= 1;
|
||||
}
|
||||
|
||||
if (!silent && !error)
|
||||
if (likely(!silent && !error))
|
||||
{
|
||||
binlog_error= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
|
||||
if (!binlog_error)
|
||||
if (likely(!binlog_error))
|
||||
my_ok(thd);
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
query_cache_invalidate3(thd, table_list, 0);
|
||||
|
||||
err:
|
||||
|
@ -332,7 +332,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, const LEX_CSTRING *new_db,
|
|||
{
|
||||
my_error(ER_NO_SUCH_TABLE, MYF(0), ren_table->db.str, old_alias.str);
|
||||
}
|
||||
if (rc && !skip_error)
|
||||
if (unlikely(rc && !skip_error))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
|
|
@ -374,7 +374,7 @@ static int send_file(THD *thd)
|
|||
We need net_flush here because the client will not know it needs to send
|
||||
us the file name until it has processed the load event entry
|
||||
*/
|
||||
if (net_flush(net) || (packet_len = my_net_read(net)) == packet_error)
|
||||
if (unlikely(net_flush(net) || (packet_len = my_net_read(net)) == packet_error))
|
||||
{
|
||||
errmsg = "while reading file name";
|
||||
goto err;
|
||||
|
@ -1259,12 +1259,12 @@ gtid_find_binlog_file(slave_connection_state *state, char *out_name,
|
|||
goto end;
|
||||
}
|
||||
bzero((char*) &cache, sizeof(cache));
|
||||
if ((file= open_binlog(&cache, buf, &errormsg)) == (File)-1)
|
||||
if (unlikely((file= open_binlog(&cache, buf, &errormsg)) == (File)-1))
|
||||
goto end;
|
||||
errormsg= get_gtid_list_event(&cache, &glev);
|
||||
end_io_cache(&cache);
|
||||
mysql_file_close(file, MYF(MY_WME));
|
||||
if (errormsg)
|
||||
if (unlikely(errormsg))
|
||||
goto end;
|
||||
|
||||
if (!glev || contains_all_slave_gtid(state, glev))
|
||||
|
@ -1371,14 +1371,14 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
String packet;
|
||||
Format_description_log_event *fdev= NULL;
|
||||
|
||||
if (gtid_state->load((const rpl_gtid *)NULL, 0))
|
||||
if (unlikely(gtid_state->load((const rpl_gtid *)NULL, 0)))
|
||||
{
|
||||
errormsg= "Internal error (out of memory?) initializing slave state "
|
||||
"while scanning binlog to find start position";
|
||||
return errormsg;
|
||||
}
|
||||
|
||||
if ((file= open_binlog(&cache, name, &errormsg)) == (File)-1)
|
||||
if (unlikely((file= open_binlog(&cache, name, &errormsg)) == (File)-1))
|
||||
return errormsg;
|
||||
|
||||
if (!(fdev= new Format_description_log_event(3)))
|
||||
|
@ -1411,7 +1411,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
err= Log_event::read_log_event(&cache, &packet, fdev,
|
||||
opt_master_verify_checksum ? current_checksum_alg
|
||||
: BINLOG_CHECKSUM_ALG_OFF);
|
||||
if (err)
|
||||
if (unlikely(err))
|
||||
{
|
||||
errormsg= "Could not read binlog while searching for slave start "
|
||||
"position on master";
|
||||
|
@ -1426,7 +1426,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
{
|
||||
Format_description_log_event *tmp;
|
||||
|
||||
if (found_format_description_event)
|
||||
if (unlikely(found_format_description_event))
|
||||
{
|
||||
errormsg= "Duplicate format description log event found while "
|
||||
"searching for old-style position in binlog";
|
||||
|
@ -1435,8 +1435,9 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
|
||||
current_checksum_alg= get_checksum_alg(packet.ptr(), packet.length());
|
||||
found_format_description_event= true;
|
||||
if (!(tmp= new Format_description_log_event(packet.ptr(), packet.length(),
|
||||
fdev)))
|
||||
if (unlikely(!(tmp= new Format_description_log_event(packet.ptr(),
|
||||
packet.length(),
|
||||
fdev))))
|
||||
{
|
||||
errormsg= "Corrupt Format_description event found or out-of-memory "
|
||||
"while searching for old-style position in binlog";
|
||||
|
@ -1459,7 +1460,8 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
goto end;
|
||||
}
|
||||
}
|
||||
else if (typ != FORMAT_DESCRIPTION_EVENT && !found_format_description_event)
|
||||
else if (unlikely(typ != FORMAT_DESCRIPTION_EVENT &&
|
||||
!found_format_description_event))
|
||||
{
|
||||
errormsg= "Did not find format description log event while searching "
|
||||
"for old-style position in binlog";
|
||||
|
@ -1474,7 +1476,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
bool status;
|
||||
uint32 list_len;
|
||||
|
||||
if (found_gtid_list_event)
|
||||
if (unlikely(found_gtid_list_event))
|
||||
{
|
||||
errormsg= "Found duplicate Gtid_list_log_event while scanning binlog "
|
||||
"to find slave start position";
|
||||
|
@ -1483,7 +1485,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
status= Gtid_list_log_event::peek(packet.ptr(), packet.length(),
|
||||
current_checksum_alg,
|
||||
>id_list, &list_len, fdev);
|
||||
if (status)
|
||||
if (unlikely(status))
|
||||
{
|
||||
errormsg= "Error reading Gtid_list_log_event while searching "
|
||||
"for old-style position in binlog";
|
||||
|
@ -1491,7 +1493,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
}
|
||||
err= gtid_state->load(gtid_list, list_len);
|
||||
my_free(gtid_list);
|
||||
if (err)
|
||||
if (unlikely(err))
|
||||
{
|
||||
errormsg= "Internal error (out of memory?) initialising slave state "
|
||||
"while scanning binlog to find start position";
|
||||
|
@ -1499,7 +1501,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
}
|
||||
found_gtid_list_event= true;
|
||||
}
|
||||
else if (!found_gtid_list_event)
|
||||
else if (unlikely(!found_gtid_list_event))
|
||||
{
|
||||
/* We did not find any Gtid_list_log_event, must be old binlog. */
|
||||
goto end;
|
||||
|
@ -1508,15 +1510,16 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
{
|
||||
rpl_gtid gtid;
|
||||
uchar flags2;
|
||||
if (Gtid_log_event::peek(packet.ptr(), packet.length(),
|
||||
current_checksum_alg, >id.domain_id,
|
||||
>id.server_id, >id.seq_no, &flags2, fdev))
|
||||
if (unlikely(Gtid_log_event::peek(packet.ptr(), packet.length(),
|
||||
current_checksum_alg, >id.domain_id,
|
||||
>id.server_id, >id.seq_no, &flags2,
|
||||
fdev)))
|
||||
{
|
||||
errormsg= "Corrupt gtid_log_event found while scanning binlog to find "
|
||||
"initial slave position";
|
||||
goto end;
|
||||
}
|
||||
if (gtid_state->update(>id))
|
||||
if (unlikely(gtid_state->update(>id)))
|
||||
{
|
||||
errormsg= "Internal error (out of memory?) updating slave state while "
|
||||
"scanning binlog to find start position";
|
||||
|
@ -1525,7 +1528,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
|
|||
}
|
||||
}
|
||||
|
||||
if (!valid_pos)
|
||||
if (unlikely(!valid_pos))
|
||||
{
|
||||
errormsg= "Slave requested incorrect position in master binlog. "
|
||||
"Requested position %u in file '%s', but this position does not "
|
||||
|
@ -2099,8 +2102,8 @@ static int init_binlog_sender(binlog_send_info *info,
|
|||
info->error= ER_UNKNOWN_ERROR;
|
||||
return 1;
|
||||
}
|
||||
if ((error= check_slave_start_position(info, &info->errmsg,
|
||||
&info->error_gtid)))
|
||||
if (unlikely((error= check_slave_start_position(info, &info->errmsg,
|
||||
&info->error_gtid))))
|
||||
{
|
||||
info->error= error;
|
||||
return 1;
|
||||
|
@ -2199,7 +2202,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log,
|
|||
: BINLOG_CHECKSUM_ALG_OFF);
|
||||
linfo->pos= my_b_tell(log);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
set_read_error(info, error);
|
||||
DBUG_RETURN(1);
|
||||
|
@ -2333,7 +2336,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log,
|
|||
: BINLOG_CHECKSUM_ALG_OFF);
|
||||
linfo->pos= my_b_tell(log);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
set_read_error(info, error);
|
||||
DBUG_RETURN(1);
|
||||
|
@ -2585,7 +2588,7 @@ static int send_events(binlog_send_info *info, IO_CACHE* log, LOG_INFO* linfo,
|
|||
: BINLOG_CHECKSUM_ALG_OFF);
|
||||
linfo->pos= my_b_tell(log);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
set_read_error(info, error);
|
||||
return 1;
|
||||
|
@ -2893,6 +2896,12 @@ err:
|
|||
thd->variables.max_allowed_packet= old_max_allowed_packet;
|
||||
delete info->fdev;
|
||||
|
||||
if (likely(info->error == 0))
|
||||
{
|
||||
my_eof(thd);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
if ((info->error == ER_MASTER_FATAL_ERROR_READING_BINLOG ||
|
||||
info->error == ER_SLAVE_SAME_ID) && binlog_open)
|
||||
{
|
||||
|
@ -2954,17 +2963,10 @@ err:
|
|||
"mysql", rpl_gtid_slave_state_table_name.str);
|
||||
info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG;
|
||||
}
|
||||
else if (info->error != 0 && info->errmsg != NULL)
|
||||
else if (info->errmsg != NULL)
|
||||
strcpy(info->error_text, info->errmsg);
|
||||
|
||||
if (info->error == 0)
|
||||
{
|
||||
my_eof(thd);
|
||||
}
|
||||
else
|
||||
{
|
||||
my_message(info->error, info->error_text, MYF(0));
|
||||
}
|
||||
my_message(info->error, info->error_text, MYF(0));
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -3283,9 +3285,9 @@ int reset_slave(THD *thd, Master_info* mi)
|
|||
}
|
||||
|
||||
// delete relay logs, clear relay log coordinates
|
||||
if ((error= purge_relay_logs(&mi->rli, thd,
|
||||
if (unlikely((error= purge_relay_logs(&mi->rli, thd,
|
||||
1 /* just reset */,
|
||||
&errmsg)))
|
||||
&errmsg))))
|
||||
{
|
||||
sql_errno= ER_RELAY_LOG_FAIL;
|
||||
goto err;
|
||||
|
@ -3343,7 +3345,7 @@ int reset_slave(THD *thd, Master_info* mi)
|
|||
repl_semisync_slave.reset_slave(mi);
|
||||
err:
|
||||
mi->unlock_slave_threads();
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
my_error(sql_errno, MYF(0), errmsg);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -4042,7 +4044,7 @@ bool mysql_show_binlog_events(THD* thd)
|
|||
break;
|
||||
}
|
||||
|
||||
if (event_count < limit_end && log.error)
|
||||
if (unlikely(event_count < limit_end && log.error))
|
||||
{
|
||||
errmsg = "Wrong offset or I/O error";
|
||||
mysql_mutex_unlock(log_lock);
|
||||
|
@ -4223,7 +4225,7 @@ bool show_binlogs(THD* thd)
|
|||
if (protocol->write())
|
||||
goto err;
|
||||
}
|
||||
if(index_file->error == -1)
|
||||
if (unlikely(index_file->error == -1))
|
||||
goto err;
|
||||
mysql_bin_log.unlock_index();
|
||||
my_eof(thd);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -341,7 +341,7 @@ bool sequence_insert(THD *thd, LEX *lex, TABLE_LIST *org_table_list)
|
|||
MYSQL_OPEN_HAS_MDL_LOCK);
|
||||
thd->open_options&= ~HA_OPEN_FOR_CREATE;
|
||||
thd->m_reprepare_observer= save_reprepare_observer;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
lex->restore_backup_query_tables_list(&query_tables_list_backup);
|
||||
thd->restore_backup_open_tables_state(&open_tables_backup);
|
||||
|
@ -471,7 +471,7 @@ int SEQUENCE::read_initial_values(TABLE *table)
|
|||
DBUG_RETURN(HA_ERR_LOCK_WAIT_TIMEOUT);
|
||||
}
|
||||
DBUG_ASSERT(table->reginfo.lock_type == TL_READ);
|
||||
if (!(error= read_stored_values(table)))
|
||||
if (likely(!(error= read_stored_values(table))))
|
||||
initialized= SEQ_READY_TO_USE;
|
||||
mysql_unlock_tables(thd, lock);
|
||||
if (mdl_lock_used)
|
||||
|
@ -510,7 +510,7 @@ int SEQUENCE::read_stored_values(TABLE *table)
|
|||
error= table->file->ha_read_first_row(table->record[0], MAX_KEY);
|
||||
tmp_restore_column_map(table->read_set, save_read_set);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(error);
|
||||
|
@ -591,7 +591,7 @@ int sequence_definition::write_initial_sequence(TABLE *table)
|
|||
table->s->sequence->initialized= SEQUENCE::SEQ_UNINTIALIZED;
|
||||
reenable_binlog(thd);
|
||||
table->write_set= save_write_set;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
table->file->print_error(error, MYF(0));
|
||||
else
|
||||
{
|
||||
|
@ -633,7 +633,7 @@ int sequence_definition::write(TABLE *table, bool all_fields)
|
|||
table->read_set= table->write_set= &table->s->all_set;
|
||||
table->file->column_bitmaps_signal();
|
||||
store_fields(table);
|
||||
if ((error= table->file->ha_write_row(table->record[0])))
|
||||
if (unlikely((error= table->file->ha_write_row(table->record[0]))))
|
||||
table->file->print_error(error, MYF(0));
|
||||
table->rpl_write_set= save_rpl_write_set;
|
||||
table->read_set= save_read_set;
|
||||
|
@ -740,7 +740,7 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
|
|||
DBUG_RETURN(next_value(table, 1, error));
|
||||
}
|
||||
|
||||
if ((*error= write(table, 0)))
|
||||
if (unlikely((*error= write(table, 0))))
|
||||
{
|
||||
reserved_until= org_reserved_until;
|
||||
next_free_value= res_value;
|
||||
|
@ -892,7 +892,7 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
|
|||
trapped_errors= no_such_table_handler.safely_trapped_errors();
|
||||
thd->pop_internal_handler();
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (trapped_errors)
|
||||
{
|
||||
|
@ -948,7 +948,7 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
|
|||
}
|
||||
|
||||
table->s->sequence->write_lock(table);
|
||||
if (!(error= new_seq->write(table, 1)))
|
||||
if (likely(!(error= new_seq->write(table, 1))))
|
||||
{
|
||||
/* Store the sequence values in table share */
|
||||
table->s->sequence->copy(new_seq);
|
||||
|
@ -960,9 +960,9 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
|
|||
error= 1;
|
||||
if (trans_commit_implicit(thd))
|
||||
error= 1;
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= write_bin_log(thd, 1, thd->query(), thd->query_length());
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
my_ok(thd);
|
||||
|
||||
end:
|
||||
|
|
|
@ -256,7 +256,8 @@ bool servers_reload(THD *thd)
|
|||
|
||||
tables[0].init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_SERVERS_NAME, 0, TL_READ);
|
||||
|
||||
if (open_and_lock_tables(thd, tables, FALSE, MYSQL_LOCK_IGNORE_TIMEOUT))
|
||||
if (unlikely(open_and_lock_tables(thd, tables, FALSE,
|
||||
MYSQL_LOCK_IGNORE_TIMEOUT)))
|
||||
{
|
||||
/*
|
||||
Execution might have been interrupted; only print the error message
|
||||
|
@ -395,11 +396,11 @@ insert_server(THD *thd, FOREIGN_SERVER *server)
|
|||
goto end;
|
||||
|
||||
/* insert the server into the table */
|
||||
if ((error= insert_server_record(table, server)))
|
||||
if (unlikely(error= insert_server_record(table, server)))
|
||||
goto end;
|
||||
|
||||
/* insert the server into the cache */
|
||||
if ((error= insert_server_record_into_cache(server)))
|
||||
if (unlikely((error= insert_server_record_into_cache(server))))
|
||||
goto end;
|
||||
|
||||
end:
|
||||
|
@ -542,10 +543,12 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server)
|
|||
system_charset_info);
|
||||
|
||||
/* read index until record is that specified in server_name */
|
||||
if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
(uchar *)table->field[0]->ptr,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
if (unlikely((error=
|
||||
table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
(uchar *)table->field[0]->
|
||||
ptr,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))))
|
||||
{
|
||||
/* if not found, err */
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
|
@ -559,12 +562,8 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server)
|
|||
DBUG_PRINT("info",("record for server '%s' not found!",
|
||||
server->server_name));
|
||||
/* write/insert the new server */
|
||||
if ((error=table->file->ha_write_row(table->record[0])))
|
||||
{
|
||||
if (unlikely(error=table->file->ha_write_row(table->record[0])))
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
else
|
||||
error= 0;
|
||||
}
|
||||
else
|
||||
error= ER_FOREIGN_SERVER_EXISTS;
|
||||
|
@ -608,10 +607,11 @@ static int drop_server_internal(THD *thd, LEX_SERVER_OPTIONS *server_options)
|
|||
tables.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_SERVERS_NAME, 0, TL_WRITE);
|
||||
|
||||
/* hit the memory hit first */
|
||||
if ((error= delete_server_record_in_cache(server_options)))
|
||||
if (unlikely((error= delete_server_record_in_cache(server_options))))
|
||||
goto end;
|
||||
|
||||
if (! (table= open_ltable(thd, &tables, TL_WRITE, MYSQL_LOCK_IGNORE_TIMEOUT)))
|
||||
if (unlikely(!(table= open_ltable(thd, &tables, TL_WRITE,
|
||||
MYSQL_LOCK_IGNORE_TIMEOUT))))
|
||||
{
|
||||
error= my_errno;
|
||||
goto end;
|
||||
|
@ -744,7 +744,7 @@ int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered)
|
|||
goto end;
|
||||
}
|
||||
|
||||
if ((error= update_server_record(table, altered)))
|
||||
if (unlikely((error= update_server_record(table, altered))))
|
||||
goto end;
|
||||
|
||||
error= update_server_record_in_cache(existing, altered);
|
||||
|
@ -892,10 +892,12 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server)
|
|||
server->server_name_length,
|
||||
system_charset_info);
|
||||
|
||||
if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
(uchar *)table->field[0]->ptr,
|
||||
~(longlong)0,
|
||||
HA_READ_KEY_EXACT)))
|
||||
if (unlikely((error=
|
||||
table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
(uchar *)table->field[0]->
|
||||
ptr,
|
||||
~(longlong)0,
|
||||
HA_READ_KEY_EXACT))))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -907,9 +909,9 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server)
|
|||
/* ok, so we can update since the record exists in the table */
|
||||
store_record(table,record[1]);
|
||||
store_server_fields(table, server);
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME))
|
||||
{
|
||||
DBUG_PRINT("info",("problems with ha_update_row %d", error));
|
||||
goto end;
|
||||
|
@ -950,10 +952,12 @@ delete_server_record(TABLE *table, LEX_CSTRING *name)
|
|||
/* set the field that's the PK to the value we're looking for */
|
||||
table->field[0]->store(name->str, name->length, system_charset_info);
|
||||
|
||||
if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
(uchar *)table->field[0]->ptr,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
if (unlikely((error=
|
||||
table->file->ha_index_read_idx_map(table->record[0], 0,
|
||||
(uchar *)table->field[0]->
|
||||
ptr,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT))))
|
||||
{
|
||||
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
@ -962,7 +966,7 @@ delete_server_record(TABLE *table, LEX_CSTRING *name)
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= table->file->ha_delete_row(table->record[0])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[0]))))
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1005,7 @@ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
|
|||
{
|
||||
if (thd->lex->create_info.or_replace())
|
||||
{
|
||||
if ((error= drop_server_internal(thd, server_options)))
|
||||
if (unlikely((error= drop_server_internal(thd, server_options))))
|
||||
goto end;
|
||||
}
|
||||
else if (thd->lex->create_info.if_not_exists())
|
||||
|
@ -1032,7 +1036,7 @@ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
|
|||
end:
|
||||
mysql_rwlock_unlock(&THR_LOCK_servers);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
DBUG_PRINT("info", ("problem creating server <%s>",
|
||||
server_options->server_name.str));
|
||||
|
|
|
@ -1261,7 +1261,7 @@ mysqld_show_create_get_fields(THD *thd, TABLE_LIST *table_list,
|
|||
MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL) ||
|
||||
mysql_handle_derived(thd->lex, DT_INIT | DT_PREPARE);
|
||||
thd->pop_internal_handler();
|
||||
if (open_error && (thd->killed || thd->is_error()))
|
||||
if (unlikely(open_error && (thd->killed || thd->is_error())))
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -3815,17 +3815,19 @@ bool schema_table_store_record(THD *thd, TABLE *table)
|
|||
{
|
||||
int error;
|
||||
|
||||
if (thd->killed)
|
||||
if (unlikely(thd->killed))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((error= table->file->ha_write_tmp_row(table->record[0])))
|
||||
if (unlikely((error= table->file->ha_write_tmp_row(table->record[0]))))
|
||||
{
|
||||
TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param;
|
||||
if (create_internal_tmp_table_from_heap(thd, table, param->start_recinfo,
|
||||
¶m->recinfo, error, 0, NULL))
|
||||
if (unlikely(create_internal_tmp_table_from_heap(thd, table,
|
||||
param->start_recinfo,
|
||||
¶m->recinfo, error, 0,
|
||||
NULL)))
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -4590,7 +4592,7 @@ fill_schema_table_by_open(THD *thd, MEM_ROOT *mem_root,
|
|||
else
|
||||
{
|
||||
char buf[NAME_CHAR_LEN + 1];
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
get_table_engine_for_i_s(thd, buf, table_list, &db_name, &table_name);
|
||||
|
||||
result= schema_table->process_table(thd, table_list,
|
||||
|
@ -4672,13 +4674,14 @@ static int fill_schema_table_names(THD *thd, TABLE_LIST *tables,
|
|||
else
|
||||
table->field[3]->store(STRING_WITH_LEN("ERROR"), cs);
|
||||
|
||||
if (thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE)
|
||||
if (unlikely(thd->is_error() &&
|
||||
thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE))
|
||||
{
|
||||
thd->clear_error();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (schema_table_store_record(thd, table))
|
||||
if (unlikely(schema_table_store_record(thd, table)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -5004,7 +5007,7 @@ public:
|
|||
if (*level != Sql_condition::WARN_LEVEL_ERROR)
|
||||
return false;
|
||||
|
||||
if (!thd->get_stmt_da()->is_error())
|
||||
if (likely(!thd->get_stmt_da()->is_error()))
|
||||
thd->get_stmt_da()->set_error_status(sql_errno, msg, sqlstate, *cond_hdl);
|
||||
return true; // handled!
|
||||
}
|
||||
|
@ -5129,9 +5132,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
Dynamic_array<LEX_CSTRING*> table_names;
|
||||
int res= make_table_name_list(thd, &table_names, lex,
|
||||
&plan->lookup_field_vals, db_name);
|
||||
if (res == 2) /* Not fatal error, continue */
|
||||
if (unlikely(res == 2)) /* Not fatal error, continue */
|
||||
continue;
|
||||
if (res)
|
||||
if (unlikely(res))
|
||||
goto err;
|
||||
|
||||
for (size_t i=0; i < table_names.elements(); i++)
|
||||
|
@ -5490,13 +5493,13 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
|
||||
/* Collect table info from the storage engine */
|
||||
|
||||
if(file)
|
||||
if (file)
|
||||
{
|
||||
/* If info() fails, then there's nothing else to do */
|
||||
if ((info_error= file->info(HA_STATUS_VARIABLE |
|
||||
HA_STATUS_TIME |
|
||||
HA_STATUS_VARIABLE_EXTRA |
|
||||
HA_STATUS_AUTO)) != 0)
|
||||
if (unlikely((info_error= file->info(HA_STATUS_VARIABLE |
|
||||
HA_STATUS_TIME |
|
||||
HA_STATUS_VARIABLE_EXTRA |
|
||||
HA_STATUS_AUTO)) != 0))
|
||||
{
|
||||
file->print_error(info_error, MYF(0));
|
||||
goto err;
|
||||
|
@ -5595,7 +5598,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
|
|||
}
|
||||
|
||||
err:
|
||||
if (res || info_error)
|
||||
if (unlikely(res || info_error))
|
||||
{
|
||||
/*
|
||||
If an error was encountered, push a warning, set the TABLE COMMENT
|
||||
|
@ -6527,7 +6530,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
|
|||
I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS
|
||||
rather than in SHOW KEYS
|
||||
*/
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -6755,7 +6758,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
|
|||
|
||||
if (schema_table_store_record(thd, table))
|
||||
DBUG_RETURN(1);
|
||||
if (res && thd->is_error())
|
||||
if (unlikely(res && thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -6791,7 +6794,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
|
|||
DBUG_ENTER("get_schema_constraints_record");
|
||||
if (res)
|
||||
{
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -6925,7 +6928,7 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
|
|||
*/
|
||||
if (res)
|
||||
{
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -6989,7 +6992,7 @@ static int get_schema_key_column_usage_record(THD *thd,
|
|||
DBUG_ENTER("get_schema_key_column_usage_record");
|
||||
if (res)
|
||||
{
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -7276,7 +7279,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
|
|||
|
||||
if (res)
|
||||
{
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -7688,8 +7691,9 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
TABLE *table= tables->table;
|
||||
CHARSET_INFO *cs= system_charset_info;
|
||||
OPEN_TABLE_LIST *open_list;
|
||||
if (!(open_list=list_open_tables(thd,thd->lex->select_lex.db.str, wild))
|
||||
&& thd->is_fatal_error)
|
||||
if (unlikely(!(open_list= list_open_tables(thd, thd->lex->select_lex.db.str,
|
||||
wild))) &&
|
||||
unlikely(thd->is_fatal_error))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
for (; open_list ; open_list=open_list->next)
|
||||
|
@ -7699,7 +7703,7 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond)
|
|||
table->field[1]->store(open_list->table, strlen(open_list->table), cs);
|
||||
table->field[2]->store((longlong) open_list->in_use, TRUE);
|
||||
table->field[3]->store((longlong) open_list->locked, TRUE);
|
||||
if (schema_table_store_record(thd, table))
|
||||
if (unlikely(schema_table_store_record(thd, table)))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
|
@ -7828,7 +7832,7 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
|
|||
|
||||
if (res)
|
||||
{
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
thd->get_stmt_da()->sql_errno(),
|
||||
thd->get_stmt_da()->message());
|
||||
|
@ -8774,7 +8778,7 @@ bool get_schema_tables_result(JOIN *join,
|
|||
}
|
||||
}
|
||||
thd->pop_internal_handler();
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
/*
|
||||
This hack is here, because I_S code uses thd->clear_error() a lot.
|
||||
|
@ -10273,14 +10277,14 @@ static void get_cs_converted_string_value(THD *thd,
|
|||
|
||||
try_val.copy(input_str->ptr(), input_str->length(), cs,
|
||||
thd->variables.character_set_client, &try_conv_error);
|
||||
if (!try_conv_error)
|
||||
if (likely(!try_conv_error))
|
||||
{
|
||||
String val;
|
||||
uint conv_error= 0;
|
||||
|
||||
val.copy(input_str->ptr(), input_str->length(), cs,
|
||||
system_charset_info, &conv_error);
|
||||
if (!conv_error)
|
||||
if (likely(!conv_error))
|
||||
{
|
||||
append_unescaped(output_str, val.ptr(), val.length());
|
||||
return;
|
||||
|
|
|
@ -319,7 +319,7 @@ int Sql_cmd_common_signal::eval_signal_informations(THD *thd, Sql_condition *con
|
|||
The various item->val_xxx() methods don't return an error code,
|
||||
but flag thd in case of failure.
|
||||
*/
|
||||
if (! thd->is_error())
|
||||
if (likely(!thd->is_error()))
|
||||
result= 0;
|
||||
|
||||
end:
|
||||
|
|
|
@ -402,9 +402,10 @@ public:
|
|||
if (ALIGN_SIZE(arg_length+1) < Alloced_length)
|
||||
{
|
||||
char *new_ptr;
|
||||
if (!(new_ptr=(char*)
|
||||
my_realloc(Ptr, arg_length,MYF((thread_specific ?
|
||||
MY_THREAD_SPECIFIC : 0)))))
|
||||
if (unlikely(!(new_ptr=(char*)
|
||||
my_realloc(Ptr,
|
||||
arg_length,MYF((thread_specific ?
|
||||
MY_THREAD_SPECIFIC : 0))))))
|
||||
{
|
||||
Alloced_length = 0;
|
||||
real_alloc(arg_length);
|
||||
|
@ -455,7 +456,7 @@ public:
|
|||
CHARSET_INFO *fromcs, const char *src, size_t src_length,
|
||||
size_t nchars, String_copier *copier)
|
||||
{
|
||||
if (alloc(tocs->mbmaxlen * src_length))
|
||||
if (unlikely(alloc(tocs->mbmaxlen * src_length)))
|
||||
return true;
|
||||
str_length= copier->well_formed_copy(tocs, Ptr, Alloced_length,
|
||||
fromcs, src, (uint)src_length, (uint)nchars);
|
||||
|
@ -511,7 +512,7 @@ public:
|
|||
}
|
||||
else
|
||||
{
|
||||
if (realloc_with_extra(str_length + 1))
|
||||
if (unlikely(realloc_with_extra(str_length + 1)))
|
||||
return 1;
|
||||
Ptr[str_length++]=chr;
|
||||
}
|
||||
|
@ -521,8 +522,8 @@ public:
|
|||
{
|
||||
for (const char *src_end= src + srclen ; src != src_end ; src++)
|
||||
{
|
||||
if (append(_dig_vec_lower[((uchar) *src) >> 4]) ||
|
||||
append(_dig_vec_lower[((uchar) *src) & 0x0F]))
|
||||
if (unlikely(append(_dig_vec_lower[((uchar) *src) >> 4])) ||
|
||||
unlikely(append(_dig_vec_lower[((uchar) *src) & 0x0F])))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -638,7 +639,7 @@ public:
|
|||
uint32 new_length= arg_length + str_length;
|
||||
if (new_length > Alloced_length)
|
||||
{
|
||||
if (realloc(new_length + step_alloc))
|
||||
if (unlikely(realloc(new_length + step_alloc)))
|
||||
return 0;
|
||||
}
|
||||
uint32 old_length= str_length;
|
||||
|
@ -650,7 +651,8 @@ public:
|
|||
inline bool append(const char *s, uint32 arg_length, uint32 step_alloc)
|
||||
{
|
||||
uint32 new_length= arg_length + str_length;
|
||||
if (new_length > Alloced_length && realloc(new_length + step_alloc))
|
||||
if (new_length > Alloced_length &&
|
||||
unlikely(realloc(new_length + step_alloc)))
|
||||
return TRUE;
|
||||
memcpy(Ptr+str_length, s, arg_length);
|
||||
str_length+= arg_length;
|
||||
|
|
176
sql/sql_table.cc
176
sql/sql_table.cc
|
@ -115,7 +115,7 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
|
|||
res= strconvert(&my_charset_filename, conv_name, name_len,
|
||||
system_charset_info,
|
||||
conv_string, FN_REFLEN, &errors);
|
||||
if (!res || errors)
|
||||
if (unlikely(!res || errors))
|
||||
{
|
||||
DBUG_PRINT("error", ("strconvert of '%s' failed with %u (errors: %u)", conv_name, res, errors));
|
||||
conv_name= name;
|
||||
|
@ -128,7 +128,9 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
|
|||
conv_name_end= conv_string + res;
|
||||
}
|
||||
|
||||
quote = thd ? get_quote_char_for_identifier(thd, conv_name, res - 1) : '`';
|
||||
quote= (likely(thd) ?
|
||||
get_quote_char_for_identifier(thd, conv_name, res - 1) :
|
||||
'`');
|
||||
|
||||
if (quote != EOF && (end_p - to_p > 2))
|
||||
{
|
||||
|
@ -390,7 +392,7 @@ uint filename_to_tablename(const char *from, char *to, size_t to_length,
|
|||
|
||||
res= strconvert(&my_charset_filename, from, FN_REFLEN,
|
||||
system_charset_info, to, to_length, &errors);
|
||||
if (errors) // Old 5.0 name
|
||||
if (unlikely(errors)) // Old 5.0 name
|
||||
{
|
||||
res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX, from, NullS) -
|
||||
to);
|
||||
|
@ -1160,7 +1162,8 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
|
|||
if (frm_action)
|
||||
{
|
||||
strxmov(to_path, ddl_log_entry->name, reg_ext, NullS);
|
||||
if ((error= mysql_file_delete(key_file_frm, to_path, MYF(MY_WME))))
|
||||
if (unlikely((error= mysql_file_delete(key_file_frm, to_path,
|
||||
MYF(MY_WME)))))
|
||||
{
|
||||
if (my_errno != ENOENT)
|
||||
break;
|
||||
|
@ -1172,7 +1175,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
|
|||
}
|
||||
else
|
||||
{
|
||||
if ((error= file->ha_delete_table(ddl_log_entry->name)))
|
||||
if (unlikely((error= file->ha_delete_table(ddl_log_entry->name))))
|
||||
{
|
||||
if (error != ENOENT && error != HA_ERR_NO_SUCH_TABLE)
|
||||
break;
|
||||
|
@ -1425,19 +1428,19 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
|
|||
+ (2*FN_REFLEN)],
|
||||
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
|
||||
+ (3*FN_REFLEN)]));
|
||||
if (write_ddl_log_file_entry((*active_entry)->entry_pos))
|
||||
if (unlikely(write_ddl_log_file_entry((*active_entry)->entry_pos)))
|
||||
{
|
||||
error= TRUE;
|
||||
sql_print_error("Failed to write entry_no = %u",
|
||||
(*active_entry)->entry_pos);
|
||||
}
|
||||
if (write_header && !error)
|
||||
if (write_header && likely(!error))
|
||||
{
|
||||
(void) sync_ddl_log_no_lock();
|
||||
if (write_ddl_log_header())
|
||||
error= TRUE;
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
release_ddl_log_memory_entry(*active_entry);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
@ -1868,8 +1871,10 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
|
|||
lpt->create_info->tmp_table(), frm.str, frm.length);
|
||||
my_free(const_cast<uchar*>(frm.str));
|
||||
|
||||
if (error || lpt->table->file->ha_create_partitioning_metadata(shadow_path,
|
||||
NULL, CHF_CREATE_FLAG))
|
||||
if (unlikely(error) ||
|
||||
unlikely(lpt->table->file->
|
||||
ha_create_partitioning_metadata(shadow_path,
|
||||
NULL, CHF_CREATE_FLAG)))
|
||||
{
|
||||
mysql_file_delete(key_file_frm, shadow_frm_name, MYF(0));
|
||||
error= 1;
|
||||
|
@ -2127,7 +2132,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists,
|
|||
false, drop_sequence, false, false);
|
||||
thd->pop_internal_handler();
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(TRUE);
|
||||
my_ok(thd);
|
||||
DBUG_RETURN(FALSE);
|
||||
|
@ -2524,26 +2529,26 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
|
|||
*/
|
||||
(void) mysql_file_delete(key_file_frm, path, MYF(0));
|
||||
}
|
||||
else if (mysql_file_delete(key_file_frm, path,
|
||||
MYF(MY_WME)))
|
||||
else if (unlikely(mysql_file_delete(key_file_frm, path,
|
||||
MYF(MY_WME))))
|
||||
{
|
||||
frm_delete_error= my_errno;
|
||||
DBUG_ASSERT(frm_delete_error);
|
||||
}
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
int trigger_drop_error= 0;
|
||||
|
||||
if (!frm_delete_error)
|
||||
if (likely(!frm_delete_error))
|
||||
{
|
||||
non_tmp_table_deleted= TRUE;
|
||||
trigger_drop_error=
|
||||
Table_triggers_list::drop_all_triggers(thd, &db, &table->table_name);
|
||||
}
|
||||
|
||||
if (trigger_drop_error ||
|
||||
if (unlikely(trigger_drop_error) ||
|
||||
(frm_delete_error && frm_delete_error != ENOENT))
|
||||
error= 1;
|
||||
else if (frm_delete_error && if_exists)
|
||||
|
@ -4809,10 +4814,14 @@ int create_table_impl(THD *thd,
|
|||
create_info->data_file_name= create_info->index_file_name= 0;
|
||||
}
|
||||
else
|
||||
if (error_if_data_home_dir(create_info->data_file_name, "DATA DIRECTORY") ||
|
||||
error_if_data_home_dir(create_info->index_file_name, "INDEX DIRECTORY")||
|
||||
check_partition_dirs(thd->lex->part_info))
|
||||
goto err;
|
||||
{
|
||||
if (unlikely(error_if_data_home_dir(create_info->data_file_name,
|
||||
"DATA DIRECTORY")) ||
|
||||
unlikely(error_if_data_home_dir(create_info->index_file_name,
|
||||
"INDEX DIRECTORY")) ||
|
||||
unlikely(check_partition_dirs(thd->lex->part_info)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
alias= const_cast<LEX_CSTRING*>(table_case_name(create_info, table_name));
|
||||
|
||||
|
@ -5195,9 +5204,10 @@ err:
|
|||
thd->transaction.stmt.mark_created_temp_table();
|
||||
|
||||
/* Write log if no error or if we already deleted a table */
|
||||
if (!result || thd->log_current_statement)
|
||||
if (likely(!result) || thd->log_current_statement)
|
||||
{
|
||||
if (result && create_info->table_was_deleted && pos_in_locked_tables)
|
||||
if (unlikely(result) && create_info->table_was_deleted &&
|
||||
pos_in_locked_tables)
|
||||
{
|
||||
/*
|
||||
Possible locked table was dropped. We should remove meta data locks
|
||||
|
@ -5205,7 +5215,7 @@ err:
|
|||
*/
|
||||
thd->locked_tables_list.unlock_locked_table(thd, mdl_ticket);
|
||||
}
|
||||
else if (!result && create_info->tmp_table() && create_info->table)
|
||||
else if (likely(!result) && create_info->tmp_table() && create_info->table)
|
||||
{
|
||||
/*
|
||||
Remember that tmp table creation was logged so that we know if
|
||||
|
@ -5213,8 +5223,8 @@ err:
|
|||
*/
|
||||
create_info->table->s->table_creation_was_logged= 1;
|
||||
}
|
||||
if (write_bin_log(thd, result ? FALSE : TRUE, thd->query(),
|
||||
thd->query_length(), is_trans))
|
||||
if (unlikely(write_bin_log(thd, result ? FALSE : TRUE, thd->query(),
|
||||
thd->query_length(), is_trans)))
|
||||
result= 1;
|
||||
}
|
||||
DBUG_RETURN(result);
|
||||
|
@ -5439,9 +5449,9 @@ mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db,
|
|||
error= my_errno;
|
||||
(void) file->ha_create_partitioning_metadata(to, from, CHF_RENAME_FLAG);
|
||||
}
|
||||
else if (!file || !(error=file->ha_rename_table(from_base, to_base)))
|
||||
else if (!file || likely(!(error=file->ha_rename_table(from_base, to_base))))
|
||||
{
|
||||
if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext))
|
||||
if (!(flags & NO_FRM_RENAME) && unlikely(rename_file_ext(from,to,reg_ext)))
|
||||
{
|
||||
error=my_errno;
|
||||
if (file)
|
||||
|
@ -5454,10 +5464,14 @@ mysql_rename_table(handlerton *base, const LEX_CSTRING *old_db,
|
|||
}
|
||||
}
|
||||
delete file;
|
||||
if (error == HA_ERR_WRONG_COMMAND)
|
||||
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE");
|
||||
else if (error)
|
||||
my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error);
|
||||
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error == HA_ERR_WRONG_COMMAND)
|
||||
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "ALTER TABLE");
|
||||
else
|
||||
my_error(ER_ERROR_ON_RENAME, MYF(0), from, to, error);
|
||||
}
|
||||
else if (!(flags & FN_IS_TMP))
|
||||
mysql_audit_rename_table(thd, old_db, old_name, new_db, new_name);
|
||||
|
||||
|
@ -5859,7 +5873,7 @@ int mysql_discard_or_import_tablespace(THD *thd,
|
|||
|
||||
THD_STAGE_INFO(thd, stage_end);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
|
@ -5870,15 +5884,15 @@ int mysql_discard_or_import_tablespace(THD *thd,
|
|||
|
||||
/* The ALTER TABLE is always in its own transaction */
|
||||
error= trans_commit_stmt(thd);
|
||||
if (trans_commit_implicit(thd))
|
||||
if (unlikely(trans_commit_implicit(thd)))
|
||||
error=1;
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
error= write_bin_log(thd, FALSE, thd->query(), thd->query_length());
|
||||
|
||||
err:
|
||||
thd->tablespace_op=FALSE;
|
||||
|
||||
if (error == 0)
|
||||
if (likely(error == 0))
|
||||
{
|
||||
my_ok(thd);
|
||||
DBUG_RETURN(0);
|
||||
|
@ -6007,7 +6021,7 @@ drop_create_field:
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (*f_ptr == NULL)
|
||||
if (unlikely(*f_ptr == NULL))
|
||||
{
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_BAD_FIELD_ERROR,
|
||||
|
@ -6043,7 +6057,7 @@ drop_create_field:
|
|||
acol->name, (*f_ptr)->field_name.str) == 0)
|
||||
break;
|
||||
}
|
||||
if (*f_ptr == NULL)
|
||||
if (unlikely(*f_ptr == NULL))
|
||||
{
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_BAD_FIELD_ERROR,
|
||||
|
@ -7216,18 +7230,20 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
|
|||
error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
|
||||
}
|
||||
|
||||
if (error == HA_ERR_WRONG_COMMAND)
|
||||
if (unlikely(error))
|
||||
{
|
||||
THD *thd= table->in_use;
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_ILLEGAL_HA, ER_THD(thd, ER_ILLEGAL_HA),
|
||||
table->file->table_type(),
|
||||
table->s->db.str, table->s->table_name.str);
|
||||
error= 0;
|
||||
if (error == HA_ERR_WRONG_COMMAND)
|
||||
{
|
||||
THD *thd= table->in_use;
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_ILLEGAL_HA, ER_THD(thd, ER_ILLEGAL_HA),
|
||||
table->file->table_type(),
|
||||
table->s->db.str, table->s->table_name.str);
|
||||
error= 0;
|
||||
}
|
||||
else
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
else if (error)
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
@ -8007,7 +8023,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
break;
|
||||
}
|
||||
|
||||
if (find && !find->field)
|
||||
if (likely(find && !find->field))
|
||||
find_it.remove();
|
||||
else
|
||||
{
|
||||
|
@ -8078,7 +8094,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
&find->field_name))
|
||||
break;
|
||||
}
|
||||
if (!find)
|
||||
if (unlikely(!find))
|
||||
{
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after.str,
|
||||
table->s->table_name.str);
|
||||
|
@ -8112,13 +8128,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
alter_it.remove();
|
||||
}
|
||||
}
|
||||
if (alter_info->alter_list.elements)
|
||||
if (unlikely(alter_info->alter_list.elements))
|
||||
{
|
||||
my_error(ER_BAD_FIELD_ERROR, MYF(0),
|
||||
alter_info->alter_list.head()->name, table->s->table_name.str);
|
||||
goto err;
|
||||
}
|
||||
if (!new_create_list.elements)
|
||||
if (unlikely(!new_create_list.elements))
|
||||
{
|
||||
my_message(ER_CANT_REMOVE_ALL_FIELDS,
|
||||
ER_THD(thd, ER_CANT_REMOVE_ALL_FIELDS),
|
||||
|
@ -8594,7 +8610,7 @@ static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table,
|
|||
table->file->get_parent_foreign_key_list(thd, &fk_parent_key_list);
|
||||
|
||||
/* OOM when building list. */
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
/*
|
||||
|
@ -8689,7 +8705,7 @@ static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table,
|
|||
table->file->get_foreign_key_list(thd, &fk_child_key_list);
|
||||
|
||||
/* OOM when building list. */
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
/*
|
||||
|
@ -8783,7 +8799,7 @@ simple_tmp_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
|
|||
keys_onoff);
|
||||
}
|
||||
|
||||
if (!error && alter_ctx->is_table_renamed())
|
||||
if (likely(!error) && alter_ctx->is_table_renamed())
|
||||
{
|
||||
THD_STAGE_INFO(thd, stage_rename);
|
||||
|
||||
|
@ -8796,20 +8812,17 @@ simple_tmp_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
|
|||
&alter_ctx->new_alias);
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
int res= 0;
|
||||
/*
|
||||
We do not replicate alter table statement on temporary tables under
|
||||
ROW-based replication.
|
||||
*/
|
||||
if (!thd->is_current_stmt_binlog_format_row())
|
||||
{
|
||||
res= write_bin_log(thd, true, thd->query(), thd->query_length());
|
||||
error= write_bin_log(thd, true, thd->query(), thd->query_length()) != 0;
|
||||
}
|
||||
if (res != 0)
|
||||
error= true;
|
||||
else
|
||||
if (likely(!error))
|
||||
my_ok(thd);
|
||||
}
|
||||
|
||||
|
@ -8858,7 +8871,7 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
|
|||
keys_onoff);
|
||||
}
|
||||
|
||||
if (!error && alter_ctx->is_table_renamed())
|
||||
if (likely(!error) && alter_ctx->is_table_renamed())
|
||||
{
|
||||
THD_STAGE_INFO(thd, stage_rename);
|
||||
handlerton *old_db_type= table->s->db_type();
|
||||
|
@ -8898,11 +8911,11 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
|
|||
}
|
||||
}
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
{
|
||||
error= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
|
||||
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
my_ok(thd);
|
||||
}
|
||||
table_list->table= NULL; // For query cache
|
||||
|
@ -8963,7 +8976,8 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
|
|||
based on information about the table changes from fill_alter_inplace_info().
|
||||
*/
|
||||
|
||||
bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *new_name,
|
||||
bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
|
||||
const LEX_CSTRING *new_name,
|
||||
HA_CREATE_INFO *create_info,
|
||||
TABLE_LIST *table_list,
|
||||
Alter_info *alter_info,
|
||||
|
@ -9065,7 +9079,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n
|
|||
};);
|
||||
#endif // WITH_WSREP
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
table->use_all_columns();
|
||||
|
@ -9511,7 +9525,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, const LEX_CSTRING *n
|
|||
&key_info, &key_count, &frm);
|
||||
reenable_binlog(thd);
|
||||
thd->abort_on_warning= false;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_free(const_cast<uchar*>(frm.str));
|
||||
DBUG_RETURN(true);
|
||||
|
@ -10018,8 +10032,8 @@ err_new_table_cleanup:
|
|||
the table to be altered isn't empty.
|
||||
Report error here.
|
||||
*/
|
||||
if (alter_ctx.error_if_not_empty &&
|
||||
thd->get_stmt_da()->current_row_for_warning())
|
||||
if (unlikely(alter_ctx.error_if_not_empty &&
|
||||
thd->get_stmt_da()->current_row_for_warning()))
|
||||
{
|
||||
const char *f_val= 0;
|
||||
enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE;
|
||||
|
@ -10285,15 +10299,15 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
if (!ignore) /* for now, InnoDB needs the undo log for ALTER IGNORE */
|
||||
to->file->extra(HA_EXTRA_BEGIN_ALTER_COPY);
|
||||
|
||||
while (!(error= info.read_record()))
|
||||
while (likely(!(error= info.read_record())))
|
||||
{
|
||||
if (thd->killed)
|
||||
if (unlikely(thd->killed))
|
||||
{
|
||||
thd->send_kill_message();
|
||||
error= 1;
|
||||
break;
|
||||
}
|
||||
if (++thd->progress.counter >= time_to_report_progress)
|
||||
if (unlikely(++thd->progress.counter >= time_to_report_progress))
|
||||
{
|
||||
time_to_report_progress+= MY_HOW_OFTEN_TO_WRITE/10;
|
||||
thd_progress_report(thd, thd->progress.counter,
|
||||
|
@ -10301,7 +10315,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
}
|
||||
|
||||
/* Return error if source table isn't empty. */
|
||||
if (alter_ctx->error_if_not_empty)
|
||||
if (unlikely(alter_ctx->error_if_not_empty))
|
||||
{
|
||||
error= 1;
|
||||
break;
|
||||
|
@ -10343,7 +10357,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
/* This will set thd->is_error() if fatal failure */
|
||||
if (to->verify_constraints(ignore) == VIEW_CHECK_SKIP)
|
||||
continue;
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
error= 1;
|
||||
break;
|
||||
|
@ -10353,7 +10367,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
|
||||
error= to->file->ha_write_row(to->record[0]);
|
||||
to->auto_increment_field_not_null= FALSE;
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (to->file->is_fatal_error(error, HA_CHECK_DUP))
|
||||
{
|
||||
|
@ -10365,7 +10379,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
else
|
||||
{
|
||||
/* Duplicate key error. */
|
||||
if (alter_ctx->fk_error_if_delete_row)
|
||||
if (unlikely(alter_ctx->fk_error_if_delete_row))
|
||||
{
|
||||
/*
|
||||
We are trying to omit a row from the table which serves as parent
|
||||
|
@ -10421,7 +10435,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
/* We are going to drop the temporary table */
|
||||
to->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
|
||||
}
|
||||
if (to->file->ha_end_bulk_insert() && error <= 0)
|
||||
if (unlikely(to->file->ha_end_bulk_insert()) && error <= 0)
|
||||
{
|
||||
/* Give error, if not already given */
|
||||
if (!thd->is_error())
|
||||
|
@ -10432,7 +10446,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
to->file->extra(HA_EXTRA_END_ALTER_COPY);
|
||||
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
|
||||
if (mysql_trans_commit_alter_copy_data(thd))
|
||||
if (unlikely(mysql_trans_commit_alter_copy_data(thd)))
|
||||
error= 1;
|
||||
|
||||
err:
|
||||
|
@ -10447,10 +10461,10 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
|||
*copied= found_count;
|
||||
*deleted=delete_count;
|
||||
to->file->ha_release_auto_increment();
|
||||
if (to->file->ha_external_lock(thd,F_UNLCK))
|
||||
error=1;
|
||||
if (error < 0 && !from->s->tmp_table &&
|
||||
to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME))
|
||||
if (unlikely(to->file->ha_external_lock(thd,F_UNLCK)))
|
||||
error= 1;
|
||||
if (likely(error < 0) && !from->s->tmp_table &&
|
||||
unlikely(to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME)))
|
||||
error= 1;
|
||||
thd_progress_end(thd);
|
||||
DBUG_RETURN(error > 0 ? -1 : 0);
|
||||
|
|
|
@ -46,21 +46,16 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
|
|||
|
||||
if (hton->alter_tablespace)
|
||||
{
|
||||
if ((error= hton->alter_tablespace(hton, thd, ts_info)))
|
||||
if (unlikely((error= hton->alter_tablespace(hton, thd, ts_info))))
|
||||
{
|
||||
if (error == 1)
|
||||
{
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
if (error == HA_ADMIN_NOT_IMPLEMENTED)
|
||||
{
|
||||
my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "");
|
||||
}
|
||||
else
|
||||
{
|
||||
my_error(error, MYF(0));
|
||||
}
|
||||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1477,7 +1477,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db,
|
|||
&lex.trg_chistics.anchor_trigger_name,
|
||||
trigger);
|
||||
|
||||
if (parse_error)
|
||||
if (unlikely(parse_error))
|
||||
{
|
||||
LEX_CSTRING *name;
|
||||
|
||||
|
@ -1491,10 +1491,10 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db,
|
|||
DBUG_ASSERT(lex.sphead == 0);
|
||||
lex_end(&lex);
|
||||
|
||||
if ((name= error_handler.get_trigger_name()))
|
||||
if (likely((name= error_handler.get_trigger_name())))
|
||||
{
|
||||
if (!(make_lex_string(&trigger->name, name->str,
|
||||
name->length, &table->mem_root)))
|
||||
if (unlikely(!(make_lex_string(&trigger->name, name->str,
|
||||
name->length, &table->mem_root))))
|
||||
goto err_with_lex_cleanup;
|
||||
}
|
||||
trigger->definer= ((!trg_definer || !trg_definer->length) ?
|
||||
|
@ -1615,7 +1615,7 @@ err_with_lex_cleanup:
|
|||
}
|
||||
|
||||
error:
|
||||
if (!thd->is_error())
|
||||
if (unlikely(!thd->is_error()))
|
||||
{
|
||||
/*
|
||||
We don't care about this error message much because .TRG files will
|
||||
|
@ -1891,7 +1891,7 @@ change_table_name_in_triggers(THD *thd,
|
|||
|
||||
thd->variables.sql_mode= save_sql_mode;
|
||||
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
return TRUE; /* OOM */
|
||||
|
||||
if (save_trigger_file(thd, new_db_name, new_table_name))
|
||||
|
@ -2103,9 +2103,9 @@ bool Table_triggers_list::change_table_name(THD *thd, const LEX_CSTRING *db,
|
|||
goto end;
|
||||
}
|
||||
}
|
||||
if (table.triggers->change_table_name_in_triggers(thd, db, new_db,
|
||||
old_alias,
|
||||
new_table))
|
||||
if (unlikely(table.triggers->change_table_name_in_triggers(thd, db, new_db,
|
||||
old_alias,
|
||||
new_table)))
|
||||
{
|
||||
result= 1;
|
||||
goto end;
|
||||
|
@ -2249,7 +2249,7 @@ add_tables_and_routines_for_triggers(THD *thd,
|
|||
{
|
||||
sp_head *trigger= triggers->body;
|
||||
|
||||
if (!triggers->body) // Parse error
|
||||
if (unlikely(!triggers->body)) // Parse error
|
||||
continue;
|
||||
|
||||
MDL_key key(MDL_key::TRIGGER, trigger->m_db.str, trigger->m_name.str);
|
||||
|
|
|
@ -139,7 +139,7 @@ fk_truncate_illegal_if_parent(THD *thd, TABLE *table)
|
|||
table->file->get_parent_foreign_key_list(thd, &fk_list);
|
||||
|
||||
/* Out of memory when building list. */
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
return TRUE;
|
||||
|
||||
it.init(fk_list);
|
||||
|
@ -240,7 +240,7 @@ Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref,
|
|||
DBUG_RETURN(TRUNCATE_FAILED_SKIP_BINLOG);
|
||||
|
||||
error= table_ref->table->file->ha_truncate();
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table_ref->table->file->print_error(error, MYF(0));
|
||||
/*
|
||||
|
@ -447,7 +447,7 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref)
|
|||
query must be written to the binary log. The only exception is a
|
||||
unimplemented truncate method.
|
||||
*/
|
||||
if (error == TRUNCATE_OK || error == TRUNCATE_FAILED_BUT_BINLOG)
|
||||
if (unlikely(error == TRUNCATE_OK || error == TRUNCATE_FAILED_BUT_BINLOG))
|
||||
binlog_stmt= true;
|
||||
else
|
||||
binlog_stmt= false;
|
||||
|
|
|
@ -255,7 +255,7 @@ bool table_value_constr::prepare(THD *thd, SELECT_LEX *sl,
|
|||
sl->item_list.push_back(new_holder);
|
||||
}
|
||||
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
DBUG_RETURN(true); // out of memory
|
||||
|
||||
result= tmp_result;
|
||||
|
|
|
@ -252,7 +252,7 @@ void udf_init()
|
|||
}
|
||||
}
|
||||
}
|
||||
if (error > 0)
|
||||
if (unlikely(error > 0))
|
||||
sql_print_error("Got unknown error: %d", my_errno);
|
||||
end_read_record(&read_record_info);
|
||||
table->m_needs_reopen= TRUE; // Force close to free memory
|
||||
|
@ -453,7 +453,7 @@ static int mysql_drop_function_internal(THD *thd, udf_func *udf, TABLE *table)
|
|||
HA_READ_KEY_EXACT))
|
||||
{
|
||||
int error;
|
||||
if ((error= table->file->ha_delete_row(table->record[0])))
|
||||
if (unlikely((error= table->file->ha_delete_row(table->record[0]))))
|
||||
table->file->print_error(error, MYF(0));
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
|
@ -513,7 +513,7 @@ int mysql_create_function(THD *thd,udf_func *udf)
|
|||
{
|
||||
if (thd->lex->create_info.or_replace())
|
||||
{
|
||||
if ((error= mysql_drop_function_internal(thd, u_d, table)))
|
||||
if (unlikely((error= mysql_drop_function_internal(thd, u_d, table))))
|
||||
goto err;
|
||||
}
|
||||
else if (thd->lex->create_info.if_not_exists())
|
||||
|
@ -569,7 +569,7 @@ int mysql_create_function(THD *thd,udf_func *udf)
|
|||
/* create entry in mysql.func table */
|
||||
|
||||
/* Allow creation of functions even if we can't open func table */
|
||||
if (!table)
|
||||
if (unlikely(!table))
|
||||
goto err;
|
||||
table->use_all_columns();
|
||||
restore_record(table, s->default_values); // Default values for fields
|
||||
|
@ -578,9 +578,9 @@ int mysql_create_function(THD *thd,udf_func *udf)
|
|||
table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl), system_charset_info);
|
||||
if (table->s->fields >= 4) // If not old func format
|
||||
table->field[3]->store((longlong) u_d->type, TRUE);
|
||||
error = table->file->ha_write_row(table->record[0]);
|
||||
error= table->file->ha_write_row(table->record[0]);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
my_error(ER_ERROR_ON_WRITE, MYF(0), "mysql.func", error);
|
||||
del_udf(u_d);
|
||||
|
@ -591,7 +591,7 @@ done:
|
|||
mysql_rwlock_unlock(&THR_LOCK_udf);
|
||||
|
||||
/* Binlog the create function. */
|
||||
if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
|
||||
if (unlikely(write_bin_log(thd, TRUE, thd->query(), thd->query_length())))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
DBUG_RETURN(0);
|
||||
|
|
|
@ -126,7 +126,7 @@ int select_unit::send_data(List<Item> &values)
|
|||
}
|
||||
else
|
||||
fill_record(thd, table, table->field, values, TRUE, FALSE);
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
rc= 1;
|
||||
goto end;
|
||||
|
@ -146,7 +146,8 @@ int select_unit::send_data(List<Item> &values)
|
|||
{
|
||||
case UNION_TYPE:
|
||||
{
|
||||
if ((write_err= table->file->ha_write_tmp_row(table->record[0])))
|
||||
if (unlikely((write_err=
|
||||
table->file->ha_write_tmp_row(table->record[0]))))
|
||||
{
|
||||
if (write_err == HA_ERR_FOUND_DUPP_KEY)
|
||||
{
|
||||
|
@ -235,7 +236,7 @@ int select_unit::send_data(List<Item> &values)
|
|||
rc= 0;
|
||||
|
||||
end:
|
||||
if (not_reported_error)
|
||||
if (unlikely(not_reported_error))
|
||||
{
|
||||
DBUG_ASSERT(rc);
|
||||
table->file->print_error(not_reported_error, MYF(0));
|
||||
|
@ -267,13 +268,12 @@ bool select_unit::send_eof()
|
|||
handler *file= table->file;
|
||||
int error;
|
||||
|
||||
if (file->ha_rnd_init_with_error(1))
|
||||
if (unlikely(file->ha_rnd_init_with_error(1)))
|
||||
return 1;
|
||||
|
||||
do
|
||||
{
|
||||
error= file->ha_rnd_next(table->record[0]);
|
||||
if (error)
|
||||
if (unlikely(error= file->ha_rnd_next(table->record[0])))
|
||||
{
|
||||
if (error == HA_ERR_END_OF_FILE)
|
||||
{
|
||||
|
@ -289,10 +289,10 @@ bool select_unit::send_eof()
|
|||
}
|
||||
if (table->field[0]->val_int() != curr_step)
|
||||
error= file->ha_delete_tmp_row(table->record[0]);
|
||||
} while (!error);
|
||||
} while (likely(!error));
|
||||
file->ha_rnd_end();
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
table->file->print_error(error, MYF(0));
|
||||
|
||||
return(MY_TEST(error));
|
||||
|
@ -325,7 +325,7 @@ int select_union_recursive::send_data(List<Item> &values)
|
|||
bool select_unit::flush()
|
||||
{
|
||||
int error;
|
||||
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
|
||||
if (unlikely((error=table->file->extra(HA_EXTRA_NO_CACHE))))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
return 1;
|
||||
|
@ -552,7 +552,7 @@ int select_union_direct::send_data(List<Item> &items)
|
|||
|
||||
send_records++;
|
||||
fill_record(thd, table, table->field, items, true, false);
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
return true; /* purecov: inspected */
|
||||
|
||||
return result->send_data(unit->item_list);
|
||||
|
@ -675,7 +675,7 @@ bool st_select_lex_unit::prepare_join(THD *thd_arg, SELECT_LEX *sl,
|
|||
sl->with_wild= 0;
|
||||
last_procedure= join->procedure;
|
||||
|
||||
if (saved_error || (saved_error= thd_arg->is_fatal_error))
|
||||
if (unlikely(saved_error || (saved_error= thd_arg->is_fatal_error)))
|
||||
DBUG_RETURN(true);
|
||||
/*
|
||||
Remove all references from the select_lex_units to the subqueries that
|
||||
|
@ -806,7 +806,7 @@ bool st_select_lex_unit::join_union_item_types(THD *thd_arg,
|
|||
&holders[pos]/*Type_all_attributes*/,
|
||||
holders[pos].get_maybe_null()));
|
||||
}
|
||||
if (thd_arg->is_fatal_error)
|
||||
if (unlikely(thd_arg->is_fatal_error))
|
||||
DBUG_RETURN(true); // out of memory
|
||||
DBUG_RETURN(false);
|
||||
}
|
||||
|
@ -1134,7 +1134,7 @@ cont:
|
|||
hidden);
|
||||
if (intersect_mark)
|
||||
types.pop();
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
goto err;
|
||||
}
|
||||
if (fake_select_lex && !fake_select_lex->first_cond_optimization)
|
||||
|
@ -1171,7 +1171,7 @@ cont:
|
|||
if (arena)
|
||||
thd->restore_active_arena(arena, &backup_arena);
|
||||
|
||||
if (saved_error)
|
||||
if (unlikely(saved_error))
|
||||
goto err;
|
||||
|
||||
if (fake_select_lex != NULL &&
|
||||
|
@ -1320,7 +1320,7 @@ bool st_select_lex_unit::optimize()
|
|||
saved_error= sl->join->optimize();
|
||||
}
|
||||
|
||||
if (saved_error)
|
||||
if (unlikely(saved_error))
|
||||
{
|
||||
thd->lex->current_select= lex_select_save;
|
||||
DBUG_RETURN(saved_error);
|
||||
|
@ -1358,7 +1358,7 @@ bool st_select_lex_unit::exec()
|
|||
if (!saved_error && !was_executed)
|
||||
save_union_explain(thd->lex->explain);
|
||||
|
||||
if (saved_error)
|
||||
if (unlikely(saved_error))
|
||||
DBUG_RETURN(saved_error);
|
||||
|
||||
if (union_result)
|
||||
|
@ -1423,7 +1423,7 @@ bool st_select_lex_unit::exec()
|
|||
saved_error= sl->join->optimize();
|
||||
}
|
||||
}
|
||||
if (!saved_error)
|
||||
if (likely(!saved_error))
|
||||
{
|
||||
records_at_start= table->file->stats.records;
|
||||
if (sl->tvc)
|
||||
|
@ -1434,7 +1434,7 @@ bool st_select_lex_unit::exec()
|
|||
{
|
||||
// This is UNION DISTINCT, so there should be a fake_select_lex
|
||||
DBUG_ASSERT(fake_select_lex != NULL);
|
||||
if (table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL))
|
||||
if (unlikely(table->file->ha_disable_indexes(HA_KEY_SWITCH_ALL)))
|
||||
DBUG_RETURN(TRUE);
|
||||
table->no_keyread=1;
|
||||
}
|
||||
|
@ -1443,7 +1443,7 @@ bool st_select_lex_unit::exec()
|
|||
offset_limit_cnt= (ha_rows)(sl->offset_limit ?
|
||||
sl->offset_limit->val_uint() :
|
||||
0);
|
||||
if (!saved_error)
|
||||
if (likely(!saved_error))
|
||||
{
|
||||
examined_rows+= thd->get_examined_row_count();
|
||||
thd->set_examined_row_count(0);
|
||||
|
@ -1454,7 +1454,7 @@ bool st_select_lex_unit::exec()
|
|||
}
|
||||
}
|
||||
}
|
||||
if (saved_error)
|
||||
if (unlikely(saved_error))
|
||||
{
|
||||
thd->lex->current_select= lex_select_save;
|
||||
DBUG_RETURN(saved_error);
|
||||
|
@ -1463,7 +1463,7 @@ bool st_select_lex_unit::exec()
|
|||
{
|
||||
/* Needed for the following test and for records_at_start in next loop */
|
||||
int error= table->file->info(HA_STATUS_VARIABLE);
|
||||
if(error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
table->file->print_error(error, MYF(0));
|
||||
DBUG_RETURN(1);
|
||||
|
@ -1509,7 +1509,8 @@ bool st_select_lex_unit::exec()
|
|||
*/
|
||||
thd->lex->limit_rows_examined_cnt= ULONGLONG_MAX;
|
||||
|
||||
if (fake_select_lex != NULL && !thd->is_fatal_error) // Check if EOM
|
||||
// Check if EOM
|
||||
if (fake_select_lex != NULL && likely(!thd->is_fatal_error))
|
||||
{
|
||||
/* Send result to 'result' */
|
||||
saved_error= true;
|
||||
|
@ -1528,8 +1529,9 @@ bool st_select_lex_unit::exec()
|
|||
don't let it allocate the join. Perhaps this is because we need
|
||||
some special parameter values passed to join constructor?
|
||||
*/
|
||||
if (!(fake_select_lex->join= new JOIN(thd, item_list,
|
||||
fake_select_lex->options, result)))
|
||||
if (unlikely(!(fake_select_lex->join=
|
||||
new JOIN(thd, item_list, fake_select_lex->options,
|
||||
result))))
|
||||
{
|
||||
fake_select_lex->table_list.empty();
|
||||
goto err;
|
||||
|
@ -1595,7 +1597,7 @@ bool st_select_lex_unit::exec()
|
|||
}
|
||||
|
||||
fake_select_lex->table_list.empty();
|
||||
if (!saved_error)
|
||||
if (likely(!saved_error))
|
||||
{
|
||||
thd->limit_found_rows = (ulonglong)table->file->stats.records + add_rows;
|
||||
thd->inc_examined_row_count(examined_rows);
|
||||
|
@ -1671,7 +1673,7 @@ bool st_select_lex_unit::exec_recursive()
|
|||
if (with_element->with_anchor)
|
||||
end= with_element->first_recursive;
|
||||
}
|
||||
else if ((saved_error= incr_table->file->ha_delete_all_rows()))
|
||||
else if (unlikely((saved_error= incr_table->file->ha_delete_all_rows())))
|
||||
goto err;
|
||||
|
||||
for (st_select_lex *sl= start ; sl != end; sl= sl->next_select())
|
||||
|
@ -1698,17 +1700,17 @@ bool st_select_lex_unit::exec_recursive()
|
|||
sl->join->exec();
|
||||
saved_error= sl->join->error;
|
||||
}
|
||||
if (!saved_error)
|
||||
if (likely(!saved_error))
|
||||
{
|
||||
examined_rows+= thd->get_examined_row_count();
|
||||
thd->set_examined_row_count(0);
|
||||
if (union_result->flush())
|
||||
if (unlikely(union_result->flush()))
|
||||
{
|
||||
thd->lex->current_select= lex_select_save;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
}
|
||||
if (saved_error)
|
||||
if (unlikely(saved_error))
|
||||
{
|
||||
thd->lex->current_select= lex_select_save;
|
||||
goto err;
|
||||
|
|
|
@ -231,7 +231,7 @@ static void prepare_record_for_error_message(int error, TABLE *table)
|
|||
Get the number of the offended index.
|
||||
We will see MAX_KEY if the engine cannot determine the affected index.
|
||||
*/
|
||||
if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
|
||||
if (unlikely((keynr= table->file->get_dup_key(error)) >= MAX_KEY))
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
/* Create unique_map with all fields used by that index. */
|
||||
|
@ -471,8 +471,8 @@ int mysql_update(THD *thd,
|
|||
set_statistics_for_table(thd, table);
|
||||
|
||||
select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
|
||||
if (error || !limit || thd->is_error() ||
|
||||
(select && select->check_quick(thd, safe_update, limit)))
|
||||
if (unlikely(error || !limit || thd->is_error() ||
|
||||
(select && select->check_quick(thd, safe_update, limit))))
|
||||
{
|
||||
query_plan.set_impossible_where();
|
||||
if (thd->lex->describe || thd->lex->analyze_stmt)
|
||||
|
@ -506,7 +506,7 @@ int mysql_update(THD *thd,
|
|||
goto err;
|
||||
}
|
||||
}
|
||||
if (init_ftfuncs(thd, select_lex, 1))
|
||||
if (unlikely(init_ftfuncs(thd, select_lex, 1)))
|
||||
goto err;
|
||||
|
||||
table->mark_columns_needed_for_update();
|
||||
|
@ -724,7 +724,7 @@ int mysql_update(THD *thd,
|
|||
error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
|
||||
reverse);
|
||||
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
{
|
||||
close_cached_file(&tempfile);
|
||||
goto err;
|
||||
|
@ -733,7 +733,7 @@ int mysql_update(THD *thd,
|
|||
THD_STAGE_INFO(thd, stage_searching_rows_for_update);
|
||||
ha_rows tmp_limit= limit;
|
||||
|
||||
while (!(error=info.read_record()) && !thd->killed)
|
||||
while (likely(!(error=info.read_record())) && likely(!thd->killed))
|
||||
{
|
||||
explain->buf_tracker.on_record_read();
|
||||
thd->inc_examined_row_count(1);
|
||||
|
@ -744,8 +744,8 @@ int mysql_update(THD *thd,
|
|||
|
||||
explain->buf_tracker.on_record_after_where();
|
||||
table->file->position(table->record[0]);
|
||||
if (my_b_write(&tempfile,table->file->ref,
|
||||
table->file->ref_length))
|
||||
if (unlikely(my_b_write(&tempfile,table->file->ref,
|
||||
table->file->ref_length)))
|
||||
{
|
||||
error=1; /* purecov: inspected */
|
||||
break; /* purecov: inspected */
|
||||
|
@ -763,7 +763,7 @@ int mysql_update(THD *thd,
|
|||
error since in this case the transaction might have been
|
||||
rolled back already.
|
||||
*/
|
||||
if (error < 0)
|
||||
if (unlikely(error < 0))
|
||||
{
|
||||
/* Fatal error from select->skip_record() */
|
||||
error= 1;
|
||||
|
@ -773,7 +773,7 @@ int mysql_update(THD *thd,
|
|||
table->file->unlock_row();
|
||||
}
|
||||
}
|
||||
if (thd->killed && !error)
|
||||
if (unlikely(thd->killed) && !error)
|
||||
error= 1; // Aborted
|
||||
limit= tmp_limit;
|
||||
table->file->try_semi_consistent_read(0);
|
||||
|
@ -790,14 +790,15 @@ int mysql_update(THD *thd,
|
|||
}
|
||||
else
|
||||
{
|
||||
select= new SQL_SELECT;
|
||||
if (!(select= new SQL_SELECT))
|
||||
goto err;
|
||||
select->head=table;
|
||||
}
|
||||
|
||||
if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
|
||||
error=1; /* purecov: inspected */
|
||||
select->file=tempfile; // Read row ptrs from this file
|
||||
if (error >= 0)
|
||||
if (unlikely(reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)))
|
||||
error= 1; /* purecov: inspected */
|
||||
select->file= tempfile; // Read row ptrs from this file
|
||||
if (unlikely(error >= 0))
|
||||
goto err;
|
||||
|
||||
table->file->ha_end_keyread();
|
||||
|
@ -831,7 +832,7 @@ update_begin:
|
|||
/* Direct updating is supported */
|
||||
DBUG_PRINT("info", ("Using direct update"));
|
||||
table->reset_default_fields();
|
||||
if (!(error= table->file->ha_direct_update_rows(&updated)))
|
||||
if (unlikely(!(error= table->file->ha_direct_update_rows(&updated))))
|
||||
error= -1;
|
||||
found= updated;
|
||||
goto update_end;
|
||||
|
@ -942,11 +943,11 @@ update_begin:
|
|||
error= table->file->ha_update_row(table->record[1],
|
||||
table->record[0]);
|
||||
}
|
||||
if (error == HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
|
||||
{
|
||||
error= 0;
|
||||
}
|
||||
else if (!error)
|
||||
else if (likely(!error))
|
||||
{
|
||||
if (has_vers_fields && table->versioned())
|
||||
{
|
||||
|
@ -956,14 +957,15 @@ update_begin:
|
|||
error= vers_insert_history_row(table);
|
||||
restore_record(table, record[2]);
|
||||
}
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
updated_sys_ver++;
|
||||
}
|
||||
if (!error)
|
||||
if (likely(!error))
|
||||
updated++;
|
||||
}
|
||||
|
||||
if (error && (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
|
||||
if (unlikely(error) &&
|
||||
(!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
|
||||
{
|
||||
/*
|
||||
If (ignore && error is ignorable) we don't have to
|
||||
|
@ -982,8 +984,8 @@ update_begin:
|
|||
}
|
||||
|
||||
if (table->triggers &&
|
||||
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER, TRUE))
|
||||
unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER, TRUE)))
|
||||
{
|
||||
error= 1;
|
||||
break;
|
||||
|
@ -1036,7 +1038,7 @@ update_begin:
|
|||
Don't try unlocking the row if skip_record reported an error since in
|
||||
this case the transaction might have been rolled back already.
|
||||
*/
|
||||
else if (!thd->is_error())
|
||||
else if (likely(!thd->is_error()))
|
||||
table->file->unlock_row();
|
||||
else
|
||||
{
|
||||
|
@ -1044,7 +1046,7 @@ update_begin:
|
|||
break;
|
||||
}
|
||||
thd->get_stmt_da()->inc_current_row_for_warning();
|
||||
if (thd->is_error())
|
||||
if (unlikely(thd->is_error()))
|
||||
{
|
||||
error= 1;
|
||||
break;
|
||||
|
@ -1069,7 +1071,7 @@ update_begin:
|
|||
};);
|
||||
error= (killed_status == NOT_KILLED)? error : 1;
|
||||
|
||||
if (error &&
|
||||
if (likely(error) &&
|
||||
will_batch &&
|
||||
(loc_error= table->file->exec_bulk_update(&dup_key_found)))
|
||||
/*
|
||||
|
@ -1127,12 +1129,12 @@ update_end:
|
|||
Sometimes we want to binlog even if we updated no rows, in case user used
|
||||
it to be sure master and slave are in same state.
|
||||
*/
|
||||
if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
|
||||
if (likely(error < 0) || thd->transaction.stmt.modified_non_trans_table)
|
||||
{
|
||||
if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
|
||||
{
|
||||
int errcode= 0;
|
||||
if (error < 0)
|
||||
if (likely(error < 0))
|
||||
thd->clear_error();
|
||||
else
|
||||
errcode= query_error_code(thd, killed_status == NOT_KILLED);
|
||||
|
@ -1161,7 +1163,7 @@ update_end:
|
|||
id= thd->arg_of_last_insert_id_function ?
|
||||
thd->first_successful_insert_id_in_prev_stmt : 0;
|
||||
|
||||
if (error < 0 && !thd->lex->analyze_stmt)
|
||||
if (likely(error < 0) && likely(!thd->lex->analyze_stmt))
|
||||
{
|
||||
char buff[MYSQL_ERRMSG_SIZE];
|
||||
if (!table->versioned(VERS_TIMESTAMP))
|
||||
|
@ -1187,7 +1189,7 @@ update_end:
|
|||
*found_return= found;
|
||||
*updated_return= updated;
|
||||
|
||||
if (thd->lex->analyze_stmt)
|
||||
if (unlikely(thd->lex->analyze_stmt))
|
||||
goto emit_explain_and_leave;
|
||||
|
||||
DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
|
||||
|
@ -1207,7 +1209,7 @@ produce_explain_and_leave:
|
|||
We come here for various "degenerate" query plans: impossible WHERE,
|
||||
no-partitions-used, impossible-range, etc.
|
||||
*/
|
||||
if (!query_plan.save_explain_update_data(query_plan.mem_root, thd))
|
||||
if (unlikely(!query_plan.save_explain_update_data(query_plan.mem_root, thd)))
|
||||
goto err;
|
||||
|
||||
emit_explain_and_leave:
|
||||
|
@ -1863,7 +1865,7 @@ int multi_update::prepare(List<Item> ¬_used_values,
|
|||
bitmap_union(table->read_set, &table->tmp_set);
|
||||
}
|
||||
}
|
||||
if (error)
|
||||
if (unlikely(error))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/*
|
||||
|
@ -1907,14 +1909,14 @@ int multi_update::prepare(List<Item> ¬_used_values,
|
|||
table_count);
|
||||
values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
|
||||
table_count);
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
DBUG_RETURN(1);
|
||||
for (i=0 ; i < table_count ; i++)
|
||||
{
|
||||
fields_for_table[i]= new List_item;
|
||||
values_for_table[i]= new List_item;
|
||||
}
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/* Split fields into fields_for_table[] and values_by_table[] */
|
||||
|
@ -1926,7 +1928,7 @@ int multi_update::prepare(List<Item> ¬_used_values,
|
|||
fields_for_table[offset]->push_back(item, thd->mem_root);
|
||||
values_for_table[offset]->push_back(value, thd->mem_root);
|
||||
}
|
||||
if (thd->is_fatal_error)
|
||||
if (unlikely(thd->is_fatal_error))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/* Allocate copy fields */
|
||||
|
@ -2058,7 +2060,8 @@ multi_update::initialize_tables(JOIN *join)
|
|||
TABLE_LIST *table_ref;
|
||||
DBUG_ENTER("initialize_tables");
|
||||
|
||||
if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join))
|
||||
if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
|
||||
error_if_full_join(join)))
|
||||
DBUG_RETURN(1);
|
||||
main_table=join->join_tab->table;
|
||||
table_to_update= 0;
|
||||
|
@ -2295,7 +2298,8 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
{
|
||||
int error;
|
||||
|
||||
if (table->default_field && table->update_default_fields(1, ignore))
|
||||
if (table->default_field &&
|
||||
unlikely(table->update_default_fields(1, ignore)))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if ((error= cur_table->view_check_option(thd, ignore)) !=
|
||||
|
@ -2304,10 +2308,10 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
found--;
|
||||
if (error == VIEW_CHECK_SKIP)
|
||||
continue;
|
||||
else if (error == VIEW_CHECK_ERROR)
|
||||
else if (unlikely(error == VIEW_CHECK_ERROR))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
if (!updated++)
|
||||
if (unlikely(!updated++))
|
||||
{
|
||||
/*
|
||||
Inform the main table that we are going to update the table even
|
||||
|
@ -2316,8 +2320,8 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
*/
|
||||
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
|
||||
}
|
||||
if ((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely((error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0]))) &&
|
||||
error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{
|
||||
updated--;
|
||||
|
@ -2340,7 +2344,7 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (error == HA_ERR_RECORD_IS_THE_SAME)
|
||||
if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
|
||||
{
|
||||
error= 0;
|
||||
updated--;
|
||||
|
@ -2372,8 +2376,8 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
}
|
||||
}
|
||||
if (table->triggers &&
|
||||
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER, TRUE))
|
||||
unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER, TRUE)))
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
else
|
||||
|
@ -2409,18 +2413,23 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
|
||||
/* Write row, ignoring duplicated updates to a row */
|
||||
error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
|
||||
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
|
||||
found++;
|
||||
if (unlikely(error))
|
||||
{
|
||||
if (error &&
|
||||
create_internal_tmp_table_from_heap(thd, tmp_table,
|
||||
tmp_table_param[offset].start_recinfo,
|
||||
&tmp_table_param[offset].recinfo,
|
||||
error, 1, NULL))
|
||||
found--;
|
||||
if (error != HA_ERR_FOUND_DUPP_KEY &&
|
||||
error != HA_ERR_FOUND_DUPP_UNIQUE)
|
||||
{
|
||||
do_update= 0;
|
||||
DBUG_RETURN(1); // Not a table_is_full error
|
||||
if (create_internal_tmp_table_from_heap(thd, tmp_table,
|
||||
tmp_table_param[offset].start_recinfo,
|
||||
&tmp_table_param[offset].recinfo,
|
||||
error, 1, NULL))
|
||||
{
|
||||
do_update= 0;
|
||||
DBUG_RETURN(1); // Not a table_is_full error
|
||||
}
|
||||
found++;
|
||||
}
|
||||
found++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2431,8 +2440,8 @@ int multi_update::send_data(List<Item> ¬_used_values)
|
|||
void multi_update::abort_result_set()
|
||||
{
|
||||
/* the error was handled or nothing deleted and no side effects return */
|
||||
if (error_handled ||
|
||||
(!thd->transaction.stmt.modified_non_trans_table && !updated))
|
||||
if (unlikely(error_handled ||
|
||||
(!thd->transaction.stmt.modified_non_trans_table && !updated)))
|
||||
return;
|
||||
|
||||
/* Something already updated so we have to invalidate cache */
|
||||
|
@ -2522,7 +2531,7 @@ int multi_update::do_updates()
|
|||
org_updated= updated;
|
||||
tmp_table= tmp_tables[cur_table->shared];
|
||||
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
|
||||
if ((local_error= table->file->ha_rnd_init(0)))
|
||||
if (unlikely((local_error= table->file->ha_rnd_init(0))))
|
||||
{
|
||||
err_table= table;
|
||||
goto err;
|
||||
|
@ -2545,7 +2554,7 @@ int multi_update::do_updates()
|
|||
check_opt_it.rewind();
|
||||
while(TABLE *tbl= check_opt_it++)
|
||||
{
|
||||
if ((local_error= tbl->file->ha_rnd_init(1)))
|
||||
if (unlikely((local_error= tbl->file->ha_rnd_init(1))))
|
||||
{
|
||||
err_table= tbl;
|
||||
goto err;
|
||||
|
@ -2569,7 +2578,7 @@ int multi_update::do_updates()
|
|||
}
|
||||
copy_field_end=copy_field_ptr;
|
||||
|
||||
if ((local_error= tmp_table->file->ha_rnd_init(1)))
|
||||
if (unlikely((local_error= tmp_table->file->ha_rnd_init(1))))
|
||||
{
|
||||
err_table= tmp_table;
|
||||
goto err;
|
||||
|
@ -2584,7 +2593,8 @@ int multi_update::do_updates()
|
|||
thd->fatal_error();
|
||||
goto err2;
|
||||
}
|
||||
if ((local_error= tmp_table->file->ha_rnd_next(tmp_table->record[0])))
|
||||
if (unlikely((local_error=
|
||||
tmp_table->file->ha_rnd_next(tmp_table->record[0]))))
|
||||
{
|
||||
if (local_error == HA_ERR_END_OF_FILE)
|
||||
break;
|
||||
|
@ -2600,9 +2610,10 @@ int multi_update::do_updates()
|
|||
uint field_num= 0;
|
||||
do
|
||||
{
|
||||
if ((local_error=
|
||||
tbl->file->ha_rnd_pos(tbl->record[0],
|
||||
(uchar *) tmp_table->field[field_num]->ptr)))
|
||||
if (unlikely((local_error=
|
||||
tbl->file->ha_rnd_pos(tbl->record[0],
|
||||
(uchar *) tmp_table->
|
||||
field[field_num]->ptr))))
|
||||
{
|
||||
err_table= tbl;
|
||||
goto err;
|
||||
|
@ -2611,8 +2622,8 @@ int multi_update::do_updates()
|
|||
} while ((tbl= check_opt_it++));
|
||||
|
||||
if (table->vfield &&
|
||||
table->update_virtual_fields(table->file,
|
||||
VCOL_UPDATE_INDEXED_FOR_UPDATE))
|
||||
unlikely(table->update_virtual_fields(table->file,
|
||||
VCOL_UPDATE_INDEXED_FOR_UPDATE)))
|
||||
goto err2;
|
||||
|
||||
table->status|= STATUS_UPDATED;
|
||||
|
@ -2646,7 +2657,7 @@ int multi_update::do_updates()
|
|||
{
|
||||
if (error == VIEW_CHECK_SKIP)
|
||||
continue;
|
||||
else if (error == VIEW_CHECK_ERROR)
|
||||
else if (unlikely(error == VIEW_CHECK_ERROR))
|
||||
{
|
||||
thd->fatal_error();
|
||||
goto err2;
|
||||
|
@ -2655,8 +2666,9 @@ int multi_update::do_updates()
|
|||
if (has_vers_fields && table->versioned())
|
||||
table->vers_update_fields();
|
||||
|
||||
if ((local_error=table->file->ha_update_row(table->record[1],
|
||||
table->record[0])) &&
|
||||
if (unlikely((local_error=
|
||||
table->file->ha_update_row(table->record[1],
|
||||
table->record[0]))) &&
|
||||
local_error != HA_ERR_RECORD_IS_THE_SAME)
|
||||
{
|
||||
if (!ignore ||
|
||||
|
@ -2691,8 +2703,8 @@ int multi_update::do_updates()
|
|||
}
|
||||
|
||||
if (table->triggers &&
|
||||
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER, TRUE))
|
||||
unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
|
||||
TRG_ACTION_AFTER, TRUE)))
|
||||
goto err2;
|
||||
}
|
||||
|
||||
|
@ -2762,7 +2774,7 @@ bool multi_update::send_eof()
|
|||
error takes into account killed status gained in do_updates()
|
||||
*/
|
||||
int local_error= thd->is_error();
|
||||
if (!local_error)
|
||||
if (likely(!local_error))
|
||||
local_error = (table_count) ? do_updates() : 0;
|
||||
/*
|
||||
if local_error is not set ON until after do_updates() then
|
||||
|
@ -2792,12 +2804,13 @@ bool multi_update::send_eof()
|
|||
thd->transaction.all.m_unsafe_rollback_flags|=
|
||||
(thd->transaction.stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
|
||||
|
||||
if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table)
|
||||
if (likely(local_error == 0 ||
|
||||
thd->transaction.stmt.modified_non_trans_table))
|
||||
{
|
||||
if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
|
||||
{
|
||||
int errcode= 0;
|
||||
if (local_error == 0)
|
||||
if (likely(local_error == 0))
|
||||
thd->clear_error();
|
||||
else
|
||||
errcode= query_error_code(thd, killed_status == NOT_KILLED);
|
||||
|
@ -2824,10 +2837,10 @@ bool multi_update::send_eof()
|
|||
DBUG_ASSERT(trans_safe || !updated ||
|
||||
thd->transaction.stmt.modified_non_trans_table);
|
||||
|
||||
if (local_error != 0)
|
||||
if (likely(local_error != 0))
|
||||
error_handled= TRUE; // to force early leave from ::abort_result_set()
|
||||
|
||||
if (local_error > 0) // if the above log write did not fail ...
|
||||
if (unlikely(local_error > 0)) // if the above log write did not fail ...
|
||||
{
|
||||
/* Safety: If we haven't got an error before (can happen in do_updates) */
|
||||
my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",
|
||||
|
|
|
@ -1791,13 +1791,14 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
|
|||
TABLES we have to simply prohibit dropping of views.
|
||||
*/
|
||||
|
||||
if (thd->locked_tables_mode)
|
||||
if (unlikely(thd->locked_tables_mode))
|
||||
{
|
||||
my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
if (lock_table_names(thd, views, 0, thd->variables.lock_wait_timeout, 0))
|
||||
if (unlikely(lock_table_names(thd, views, 0,
|
||||
thd->variables.lock_wait_timeout, 0)))
|
||||
DBUG_RETURN(TRUE);
|
||||
|
||||
for (view= views; view; view= view->next_local)
|
||||
|
@ -1835,7 +1836,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
|
|||
}
|
||||
continue;
|
||||
}
|
||||
if (mysql_file_delete(key_file_frm, path, MYF(MY_WME)))
|
||||
if (unlikely(mysql_file_delete(key_file_frm, path, MYF(MY_WME))))
|
||||
error= TRUE;
|
||||
|
||||
some_views_deleted= TRUE;
|
||||
|
@ -1850,12 +1851,12 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
|
|||
sp_cache_invalidate();
|
||||
}
|
||||
|
||||
if (wrong_object_name)
|
||||
if (unlikely(wrong_object_name))
|
||||
{
|
||||
my_error(ER_WRONG_OBJECT, MYF(0), wrong_object_db, wrong_object_name,
|
||||
"VIEW");
|
||||
}
|
||||
if (non_existant_views.length())
|
||||
if (unlikely(non_existant_views.length()))
|
||||
{
|
||||
my_error(ER_UNKNOWN_VIEW, MYF(0), non_existant_views.c_ptr_safe());
|
||||
}
|
||||
|
@ -1866,11 +1867,12 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
|
|||
/* if something goes wrong, bin-log with possible error code,
|
||||
otherwise bin-log with error code cleared.
|
||||
*/
|
||||
if (write_bin_log(thd, !something_wrong, thd->query(), thd->query_length()))
|
||||
if (unlikely(write_bin_log(thd, !something_wrong, thd->query(),
|
||||
thd->query_length())))
|
||||
something_wrong= 1;
|
||||
}
|
||||
|
||||
if (something_wrong)
|
||||
if (unlikely(something_wrong))
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue