mirror of
https://github.com/MariaDB/server.git
synced 2026-05-07 23:54:31 +02:00
MDEV-37296 ALTER TABLE allows adding unique hash key with duplicate values
Problem: ======= - During copy algorithm, InnoDB fails to detect the duplicate key error for unique hash key blob index. Unique HASH index treated as virtual index inside InnoDB. When table does unique hash key , server does search on the hash key before doing any insert operation and finds the duplicate value in check_duplicate_long_entry_key(). Bulk insert does all the insert together when copy of intermediate table is finished. This leads to undetection of duplicate key error while building the index. Solution: ======== - Avoid bulk insert operation when table does have unique hash key blob index. dict_table_t::can_bulk_insert(): To check whether the table is eligible for bulk insert operation during alter copy algorithm. Check whether any virtual column name starts with DB_ROW_HASH_ to know whether blob column has unique index on it.
This commit is contained in:
parent
8c3f6a1b85
commit
e46c9a0152
4 changed files with 55 additions and 6 deletions
|
|
@ -2171,6 +2171,11 @@ struct dict_table_t {
|
|||
(as part of rolling back TRUNCATE) */
|
||||
dberr_t rename_tablespace(span<const char> new_name, bool replace) const;
|
||||
|
||||
/** Whether the table is eligible to do bulk insert operation
|
||||
@param trx transaction which tries to do bulk insert
|
||||
@retval true if table can do bulk insert
|
||||
@retval false otherwise */
|
||||
bool can_bulk_insert(const trx_t &trx) const noexcept;
|
||||
private:
|
||||
/** Initialize instant->field_map.
|
||||
@param[in] table table definition to copy from */
|
||||
|
|
|
|||
|
|
@ -2619,6 +2619,21 @@ static uint64_t row_parse_int(const byte *data, size_t len,
|
|||
return 0;
|
||||
}
|
||||
|
||||
inline bool dict_table_t::can_bulk_insert(const trx_t &trx) const noexcept
|
||||
{
|
||||
if (is_temporary() || versioned() || has_spatial_index())
|
||||
return false;
|
||||
/* Bulk insert is not compatible with HA_CHECK_UNIQUE_AFTER_WRITE.
|
||||
Refuse bulk insert if HA_KEY_ALG_LONG_HASH indexes exist.
|
||||
handler::ha_check_long_uniques() assumes that all data
|
||||
passed to ha_innobase::write_row() is available immediately. */
|
||||
if (const char *s= v_col_names)
|
||||
for (auto n= n_v_cols; n--; s+= strlen(s) + 1)
|
||||
if (!strncmp(s, C_STRING_WITH_LEN("DB_ROW_HASH_")))
|
||||
return false; /* make_long_hash_field_name() */
|
||||
return !trx.check_foreigns || (foreign_set.empty() && referenced_set.empty());
|
||||
}
|
||||
|
||||
/***************************************************************//**
|
||||
Tries to insert an entry into a clustered index, ignoring foreign key
|
||||
constraints. If a record with the same unique key is found, the other
|
||||
|
|
@ -2824,12 +2839,7 @@ avoid_bulk:
|
|||
/* If foreign key exist and foreign key is enabled
|
||||
then avoid using bulk insert for copy algorithm */
|
||||
if (innodb_alter_copy_bulk
|
||||
&& !index->table->is_temporary()
|
||||
&& !index->table->versioned()
|
||||
&& !index->table->has_spatial_index()
|
||||
&& (!trx->check_foreigns
|
||||
|| (index->table->foreign_set.empty()
|
||||
&& index->table->referenced_set.empty()))) {
|
||||
&& index->table->can_bulk_insert(*trx)) {
|
||||
ut_ad(page_is_empty(block->page.frame));
|
||||
/* This code path has been executed at the
|
||||
start of the alter operation. Consecutive
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue