mirror of
https://github.com/MariaDB/server.git
synced 2025-01-16 03:52:35 +01:00
merge with 5.1
This commit is contained in:
commit
68f02c65ef
22 changed files with 44 additions and 38 deletions
|
@ -452,7 +452,7 @@ my_bool maria_test_if_sort_rep(MARIA_HA *info, ha_rows rows, ulonglong key_map,
|
|||
|
||||
int maria_init_bulk_insert(MARIA_HA *info, ulong cache_size, ha_rows rows);
|
||||
void maria_flush_bulk_insert(MARIA_HA *info, uint inx);
|
||||
void maria_end_bulk_insert(MARIA_HA *info, my_bool table_will_be_deleted);
|
||||
void maria_end_bulk_insert(MARIA_HA *info);
|
||||
int maria_assign_to_pagecache(MARIA_HA *info, ulonglong key_map,
|
||||
PAGECACHE *key_cache);
|
||||
void maria_change_pagecache(PAGECACHE *old_key_cache,
|
||||
|
|
|
@ -4232,7 +4232,7 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
|
|||
/**
|
||||
End of an insert.
|
||||
*/
|
||||
int ha_ndbcluster::end_bulk_insert(bool abort)
|
||||
int ha_ndbcluster::end_bulk_insert()
|
||||
{
|
||||
int error= 0;
|
||||
DBUG_ENTER("end_bulk_insert");
|
||||
|
|
|
@ -322,7 +322,7 @@ class ha_ndbcluster: public handler
|
|||
double scan_time();
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
|
||||
void start_bulk_insert(ha_rows rows);
|
||||
int end_bulk_insert(bool abort);
|
||||
int end_bulk_insert();
|
||||
|
||||
static Thd_ndb* seize_thd_ndb();
|
||||
static void release_thd_ndb(Thd_ndb* thd_ndb);
|
||||
|
|
|
@ -3416,18 +3416,17 @@ ha_rows ha_partition::guess_bulk_insert_rows()
|
|||
|
||||
SYNOPSIS
|
||||
end_bulk_insert()
|
||||
abort 1 if table will be deleted (error condition)
|
||||
|
||||
RETURN VALUE
|
||||
>0 Error code
|
||||
0 Success
|
||||
|
||||
Note: end_bulk_insert can be called without start_bulk_insert
|
||||
being called, see bug¤44108.
|
||||
being called, see bug#44108.
|
||||
|
||||
*/
|
||||
|
||||
int ha_partition::end_bulk_insert(bool abort)
|
||||
int ha_partition::end_bulk_insert()
|
||||
{
|
||||
int error= 0;
|
||||
uint i;
|
||||
|
@ -3440,7 +3439,7 @@ int ha_partition::end_bulk_insert(bool abort)
|
|||
{
|
||||
int tmp;
|
||||
if (bitmap_is_set(&m_bulk_insert_started, i) &&
|
||||
(tmp= m_file[i]->ha_end_bulk_insert(abort)))
|
||||
(tmp= m_file[i]->ha_end_bulk_insert()))
|
||||
error= tmp;
|
||||
}
|
||||
bitmap_clear_all(&m_bulk_insert_started);
|
||||
|
|
|
@ -364,7 +364,7 @@ public:
|
|||
virtual int delete_row(const uchar * buf);
|
||||
virtual int delete_all_rows(void);
|
||||
virtual void start_bulk_insert(ha_rows rows);
|
||||
virtual int end_bulk_insert(bool);
|
||||
virtual int end_bulk_insert();
|
||||
private:
|
||||
ha_rows guess_bulk_insert_rows();
|
||||
void start_part_bulk_insert(THD *thd, uint part_id);
|
||||
|
|
|
@ -1382,10 +1382,10 @@ public:
|
|||
estimation_rows_to_insert= rows;
|
||||
start_bulk_insert(rows);
|
||||
}
|
||||
int ha_end_bulk_insert(bool abort)
|
||||
int ha_end_bulk_insert()
|
||||
{
|
||||
estimation_rows_to_insert= 0;
|
||||
return end_bulk_insert(abort);
|
||||
return end_bulk_insert();
|
||||
}
|
||||
int ha_bulk_update_row(const uchar *old_data, uchar *new_data,
|
||||
uint *dup_key_found);
|
||||
|
@ -2062,7 +2062,7 @@ private:
|
|||
virtual int repair(THD* thd, HA_CHECK_OPT* check_opt)
|
||||
{ return HA_ADMIN_NOT_IMPLEMENTED; }
|
||||
virtual void start_bulk_insert(ha_rows rows) {}
|
||||
virtual int end_bulk_insert(bool abort) { return 0; }
|
||||
virtual int end_bulk_insert() { return 0; }
|
||||
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
|
||||
enum ha_rkey_function find_flag)
|
||||
{ return HA_ERR_WRONG_COMMAND; }
|
||||
|
|
|
@ -8521,7 +8521,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability *
|
|||
ultimately. Still todo: fix
|
||||
*/
|
||||
}
|
||||
if ((local_error= m_table->file->ha_end_bulk_insert(0)))
|
||||
if ((local_error= m_table->file->ha_end_bulk_insert()))
|
||||
{
|
||||
m_table->file->print_error(local_error, MYF(0));
|
||||
}
|
||||
|
|
|
@ -948,7 +948,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
|
|||
fires bug#27077
|
||||
todo: explain or fix
|
||||
*/
|
||||
if ((local_error= table->file->ha_end_bulk_insert(0)))
|
||||
if ((local_error= table->file->ha_end_bulk_insert()))
|
||||
{
|
||||
table->file->print_error(local_error, MYF(0));
|
||||
}
|
||||
|
@ -2644,7 +2644,7 @@ Write_rows_log_event_old::do_after_row_operations(const Slave_reporting_capabili
|
|||
fires bug#27077
|
||||
todo: explain or fix
|
||||
*/
|
||||
if ((local_error= m_table->file->ha_end_bulk_insert(0)))
|
||||
if ((local_error= m_table->file->ha_end_bulk_insert()))
|
||||
{
|
||||
m_table->file->print_error(local_error, MYF(0));
|
||||
}
|
||||
|
|
|
@ -881,7 +881,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
|||
auto_inc values from the delayed_insert thread as they share TABLE.
|
||||
*/
|
||||
table->file->ha_release_auto_increment();
|
||||
if (using_bulk_insert && table->file->ha_end_bulk_insert(0) && !error)
|
||||
if (using_bulk_insert && table->file->ha_end_bulk_insert() && !error)
|
||||
{
|
||||
table->file->print_error(my_errno,MYF(0));
|
||||
error=1;
|
||||
|
@ -3288,7 +3288,7 @@ bool select_insert::send_eof()
|
|||
DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
|
||||
trans_table, table->file->table_type()));
|
||||
|
||||
error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert(0) : 0;
|
||||
error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert() : 0;
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
|
||||
|
||||
|
@ -3371,7 +3371,7 @@ void select_insert::abort() {
|
|||
before.
|
||||
*/
|
||||
if (!thd->prelocked_mode)
|
||||
table->file->ha_end_bulk_insert(0);
|
||||
table->file->ha_end_bulk_insert();
|
||||
|
||||
/*
|
||||
If at least one row has been inserted/modified and will stay in
|
||||
|
|
|
@ -432,7 +432,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
|||
error= read_sep_field(thd, info, table_list, fields_vars,
|
||||
set_fields, set_values, read_info,
|
||||
*enclosed, skip_lines, ignore);
|
||||
if (!thd->prelocked_mode && table->file->ha_end_bulk_insert(0) && !error)
|
||||
if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error)
|
||||
{
|
||||
table->file->print_error(my_errno, MYF(0));
|
||||
error= 1;
|
||||
|
|
|
@ -7959,7 +7959,7 @@ err:
|
|||
|
||||
if (error > 0)
|
||||
to->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
|
||||
if (errpos >= 3 && to->file->ha_end_bulk_insert(error > 1) && error <= 0)
|
||||
if (errpos >= 3 && to->file->ha_end_bulk_insert() && error <= 0)
|
||||
{
|
||||
to->file->print_error(my_errno,MYF(0));
|
||||
error= 1;
|
||||
|
|
|
@ -1555,7 +1555,7 @@ void ha_archive::start_bulk_insert(ha_rows rows)
|
|||
Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
|
||||
flag, and set the share dirty so that the next select will call sync for us.
|
||||
*/
|
||||
int ha_archive::end_bulk_insert(bool table_will_be_deleted)
|
||||
int ha_archive::end_bulk_insert()
|
||||
{
|
||||
DBUG_ENTER("ha_archive::end_bulk_insert");
|
||||
bulk_insert= FALSE;
|
||||
|
|
|
@ -134,7 +134,7 @@ public:
|
|||
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
int repair(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
void start_bulk_insert(ha_rows rows);
|
||||
int end_bulk_insert(bool table_will_be_deleted);
|
||||
int end_bulk_insert();
|
||||
enum row_type get_row_type() const
|
||||
{
|
||||
return ROW_TYPE_COMPRESSED;
|
||||
|
|
|
@ -1983,12 +1983,12 @@ void ha_federated::start_bulk_insert(ha_rows rows)
|
|||
@retval != 0 Error occured at remote server. Also sets my_errno.
|
||||
*/
|
||||
|
||||
int ha_federated::end_bulk_insert(bool abort)
|
||||
int ha_federated::end_bulk_insert()
|
||||
{
|
||||
int error= 0;
|
||||
DBUG_ENTER("ha_federated::end_bulk_insert");
|
||||
|
||||
if (!abort && bulk_insert.str && bulk_insert.length)
|
||||
if (!table_will_be_deleted && bulk_insert.str && bulk_insert.length)
|
||||
{
|
||||
if (real_query(bulk_insert.str, bulk_insert.length))
|
||||
error= stash_remote_error();
|
||||
|
@ -2905,6 +2905,8 @@ int ha_federated::extra(ha_extra_function operation)
|
|||
case HA_EXTRA_INSERT_WITH_UPDATE:
|
||||
insert_dup_update= TRUE;
|
||||
break;
|
||||
case HA_EXTRA_PREPARE_FOR_DROP:
|
||||
table_will_be_deleted = TRUE;
|
||||
default:
|
||||
/* do nothing */
|
||||
DBUG_PRINT("info",("unhandled operation: %d", (uint) operation));
|
||||
|
@ -3305,6 +3307,7 @@ int ha_federated::external_lock(THD *thd, int lock_type)
|
|||
}
|
||||
}
|
||||
#endif /* XXX_SUPERCEDED_BY_WL2952 */
|
||||
table_will_be_deleted = FALSE;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ class ha_federated: public handler
|
|||
Array of all stored results we get during a query execution.
|
||||
*/
|
||||
DYNAMIC_ARRAY results;
|
||||
bool position_called;
|
||||
bool position_called, table_will_be_deleted;
|
||||
uint fetch_num; // stores the fetch num
|
||||
MYSQL_ROW_OFFSET current_position; // Current position used by ::position()
|
||||
int remote_error_number;
|
||||
|
@ -210,7 +210,7 @@ public:
|
|||
int close(void); // required
|
||||
|
||||
void start_bulk_insert(ha_rows rows);
|
||||
int end_bulk_insert(bool abort);
|
||||
int end_bulk_insert();
|
||||
int write_row(uchar *buf);
|
||||
int update_row(const uchar *old_data, uchar *new_data);
|
||||
int delete_row(const uchar *buf);
|
||||
|
|
|
@ -2143,12 +2143,12 @@ void ha_federatedx::start_bulk_insert(ha_rows rows)
|
|||
@retval != 0 Error occured at remote server. Also sets my_errno.
|
||||
*/
|
||||
|
||||
int ha_federatedx::end_bulk_insert(bool abort)
|
||||
int ha_federatedx::end_bulk_insert()
|
||||
{
|
||||
int error= 0;
|
||||
DBUG_ENTER("ha_federatedx::end_bulk_insert");
|
||||
|
||||
if (bulk_insert.str && bulk_insert.length && !abort)
|
||||
if (bulk_insert.str && bulk_insert.length && !table_will_be_deleted)
|
||||
{
|
||||
if ((error= txn->acquire(share, FALSE, &io)))
|
||||
DBUG_RETURN(error);
|
||||
|
@ -3082,6 +3082,9 @@ int ha_federatedx::extra(ha_extra_function operation)
|
|||
case HA_EXTRA_INSERT_WITH_UPDATE:
|
||||
insert_dup_update= TRUE;
|
||||
break;
|
||||
case HA_EXTRA_PREPARE_FOR_DROP:
|
||||
table_will_be_deleted = TRUE;
|
||||
break;
|
||||
default:
|
||||
/* do nothing */
|
||||
DBUG_PRINT("info",("unhandled operation: %d", (uint) operation));
|
||||
|
@ -3391,6 +3394,7 @@ int ha_federatedx::external_lock(MYSQL_THD thd, int lock_type)
|
|||
txn->release(&io);
|
||||
else
|
||||
{
|
||||
table_will_be_deleted = FALSE;
|
||||
txn= get_txn(thd);
|
||||
if (!(error= txn->acquire(share, lock_type == F_RDLCK, &io)) &&
|
||||
(lock_type == F_WRLCK || !io->is_autocommit()))
|
||||
|
|
|
@ -259,7 +259,7 @@ class ha_federatedx: public handler
|
|||
int remote_error_number;
|
||||
char remote_error_buf[FEDERATEDX_QUERY_BUFFER_SIZE];
|
||||
bool ignore_duplicates, replace_duplicates;
|
||||
bool insert_dup_update;
|
||||
bool insert_dup_update, table_will_be_deleted;
|
||||
DYNAMIC_STRING bulk_insert;
|
||||
|
||||
private:
|
||||
|
@ -379,7 +379,7 @@ public:
|
|||
int close(void); // required
|
||||
|
||||
void start_bulk_insert(ha_rows rows);
|
||||
int end_bulk_insert(bool abort);
|
||||
int end_bulk_insert();
|
||||
int write_row(uchar *buf);
|
||||
int update_row(const uchar *old_data, uchar *new_data);
|
||||
int delete_row(const uchar *buf);
|
||||
|
|
|
@ -1980,14 +1980,14 @@ void ha_maria::start_bulk_insert(ha_rows rows)
|
|||
!= 0 Error
|
||||
*/
|
||||
|
||||
int ha_maria::end_bulk_insert(bool table_will_be_deleted)
|
||||
int ha_maria::end_bulk_insert()
|
||||
{
|
||||
int err;
|
||||
DBUG_ENTER("ha_maria::end_bulk_insert");
|
||||
maria_end_bulk_insert(file, table_will_be_deleted);
|
||||
maria_end_bulk_insert(file);
|
||||
if ((err= maria_extra(file, HA_EXTRA_NO_CACHE, 0)))
|
||||
goto end;
|
||||
if (can_enable_indexes && !table_will_be_deleted)
|
||||
if (can_enable_indexes && !file->s->deleting)
|
||||
err= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
|
||||
end:
|
||||
if (bulk_insert_single_undo != BULK_INSERT_NONE)
|
||||
|
|
|
@ -123,7 +123,7 @@ public:
|
|||
int enable_indexes(uint mode);
|
||||
int indexes_are_disabled(void);
|
||||
void start_bulk_insert(ha_rows rows);
|
||||
int end_bulk_insert(bool abort);
|
||||
int end_bulk_insert();
|
||||
ha_rows records_in_range(uint inx, key_range * min_key, key_range * max_key);
|
||||
void update_create_info(HA_CREATE_INFO * create_info);
|
||||
int create(const char *name, TABLE * form, HA_CREATE_INFO * create_info);
|
||||
|
|
|
@ -1755,7 +1755,7 @@ void maria_flush_bulk_insert(MARIA_HA *info, uint inx)
|
|||
}
|
||||
}
|
||||
|
||||
void maria_end_bulk_insert(MARIA_HA *info, my_bool abort)
|
||||
void maria_end_bulk_insert(MARIA_HA *info)
|
||||
{
|
||||
DBUG_ENTER("maria_end_bulk_insert");
|
||||
if (info->bulk_insert)
|
||||
|
@ -1765,7 +1765,7 @@ void maria_end_bulk_insert(MARIA_HA *info, my_bool abort)
|
|||
{
|
||||
if (is_tree_inited(&info->bulk_insert[i]))
|
||||
{
|
||||
if (abort)
|
||||
if (info->s->deleting)
|
||||
reset_free_element(&info->bulk_insert[i]);
|
||||
delete_tree(&info->bulk_insert[i]);
|
||||
}
|
||||
|
|
|
@ -1589,11 +1589,11 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
|
|||
!= 0 Error
|
||||
*/
|
||||
|
||||
int ha_myisam::end_bulk_insert(bool abort)
|
||||
int ha_myisam::end_bulk_insert()
|
||||
{
|
||||
mi_end_bulk_insert(file);
|
||||
int err=mi_extra(file, HA_EXTRA_NO_CACHE, 0);
|
||||
if (!err && !abort)
|
||||
if (!err && !file->s->deleting)
|
||||
{
|
||||
if (can_enable_indexes)
|
||||
{
|
||||
|
|
|
@ -107,7 +107,7 @@ class ha_myisam: public handler
|
|||
int enable_indexes(uint mode);
|
||||
int indexes_are_disabled(void);
|
||||
void start_bulk_insert(ha_rows rows);
|
||||
int end_bulk_insert(bool abort);
|
||||
int end_bulk_insert();
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
|
||||
void update_create_info(HA_CREATE_INFO *create_info);
|
||||
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
|
||||
|
|
Loading…
Reference in a new issue