MDEV-31835 Remove unnecessary extra HA_EXTRA_IGNORE_INSERT call

- This commit is different from 10.6 commit c438284863.
Due to Commit 045757af4c (MDEV-24621),
InnoDB does buffer and pre-sort the records for each index, and build
the indexes one page at a time.

Multiple large insert ignore statment aborts the server during bulk
insert operation. Problem is that InnoDB merge record exceeds
the page size. To avoid this scenario, InnoDB should catch
too big record while buffering the insert operation itself.

row_merge_buf_encode(): returns length of the encoded index record

row_merge_buf_write(): Catches the DB_TOO_BIG_RECORD earlier and
returns error
This commit is contained in:
Thirunarayanan Balathandayuthapani 2023-08-25 17:25:47 +05:30
parent afc64eacc9
commit bf3b787e02
12 changed files with 33 additions and 25 deletions

View file

@ -218,9 +218,7 @@ enum ha_extra_function {
/** Start writing rows during ALTER TABLE...ALGORITHM=COPY. */
HA_EXTRA_BEGIN_ALTER_COPY,
/** Finish writing rows during ALTER TABLE...ALGORITHM=COPY. */
HA_EXTRA_END_ALTER_COPY,
/** IGNORE is being used for the insert statement */
HA_EXTRA_IGNORE_INSERT
HA_EXTRA_END_ALTER_COPY
};
/* Compatible option, to be deleted in 6.0 */

View file

@ -0,0 +1,2 @@
423a424
> ERROR 42000: Row size too large (> 1982). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline.

View file

@ -435,6 +435,14 @@ CREATE TABLE t1 (pk int primary key, c01 text, c02 text, c03 text,
SET GLOBAL INNODB_DEFAULT_ROW_FORMAT= COMPACT;
--replace_result 1982 8126 4030 8126
ALTER TABLE t1 FORCE;
let $page_size= `SELECT @@innodb_page_size`;
let $error_code = 0;
if ($page_size == 4096) {
let $error_code = ER_TOO_BIG_ROWSIZE;
}
--error $error_code
INSERT IGNORE INTO t1 VALUES
(1, REPEAT('x',4805), REPEAT('t',2211), REPEAT('u',974), REPEAT('e',871), REPEAT('z',224), REPEAT('j',978), REPEAT('n',190), REPEAT('t',888), REPEAT('x',32768), REPEAT('e',968), REPEAT('b',913), REPEAT('x',12107)),
(2, REPEAT('x',4805), REPEAT('t',2211), REPEAT('u',974), REPEAT('e',871), REPEAT('z',224), REPEAT('j',978), REPEAT('n',190), REPEAT('t',888), REPEAT('x',32768), REPEAT('e',968), REPEAT('b',913), REPEAT('x',12107));

View file

@ -9479,7 +9479,6 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_STARTING_ORDERED_INDEX_SCAN:
case HA_EXTRA_BEGIN_ALTER_COPY:
case HA_EXTRA_END_ALTER_COPY:
case HA_EXTRA_IGNORE_INSERT:
DBUG_RETURN(loop_partitions(extra_cb, &operation));
default:
{

View file

@ -2210,9 +2210,6 @@ int write_record(THD *thd, TABLE *table, COPY_INFO *info, select_result *sink)
goto after_trg_or_ignored_err;
}
/* Notify the engine about insert ignore operation */
if (info->handle_duplicates == DUP_ERROR && info->ignore)
table->file->extra(HA_EXTRA_IGNORE_INSERT);
after_trg_n_copied_inc:
info->copied++;
thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);

View file

@ -11841,9 +11841,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
}
else
{
/* In case of alter ignore, notify the engine about it. */
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_INSERT);
DEBUG_SYNC(thd, "copy_data_between_tables_before");
found_count++;
mysql_stage_set_work_completed(thd->m_stage_progress_psi, found_count);

View file

@ -10088,7 +10088,6 @@ bool TR_table::update(ulonglong start_id, ulonglong end_id)
table->file->print_error(error, MYF(0));
/* extra() is used to apply the bulk insert operation
on mysql/transaction_registry table */
table->file->extra(HA_EXTRA_IGNORE_INSERT);
return error;
}

View file

@ -15679,13 +15679,6 @@ ha_innobase::extra(
case HA_EXTRA_RESET_STATE:
reset_template();
trx->duplicates = 0;
/* fall through */
case HA_EXTRA_IGNORE_INSERT:
/* HA_EXTRA_IGNORE_INSERT is very similar to
HA_EXTRA_IGNORE_DUP_KEY, but with one crucial difference:
we want !trx->duplicates for INSERT IGNORE so that
row_ins_duplicate_error_in_clust() will acquire a
shared lock instead of an exclusive lock. */
stmt_boundary:
trx->bulk_insert_apply();
trx->end_bulk_insert(*m_prebuilt->table);

View file

@ -532,6 +532,13 @@ public:
{
return bulk_store && is_bulk_insert();
}
/** Free bulk insert operation */
void clear_bulk_buffer()
{
delete bulk_store;
bulk_store= nullptr;
}
};
/** Collection of persistent tables and their first modification

View file

@ -281,10 +281,10 @@ row_merge_insert_index_tuples(
ut_stage_alter_t* stage= nullptr,
merge_file_t* blob_file= nullptr);
/******************************************************//**
Encode an index record. */
/** Encode an index record.
@return size of the record */
static MY_ATTRIBUTE((nonnull))
void
ulint
row_merge_buf_encode(
/*=================*/
byte** b, /*!< in/out: pointer to
@ -315,6 +315,7 @@ row_merge_buf_encode(
entry->fields, n_fields);
*b += size;
return size;
}
static MY_ATTRIBUTE((malloc, nonnull))
@ -1175,7 +1176,13 @@ dberr_t row_merge_buf_write(const row_merge_buf_t *buf,
}
}
row_merge_buf_encode(&b, index, entry, n_fields);
ulint rec_size= row_merge_buf_encode(
&b, index, entry, n_fields);
if (blob_file && rec_size > srv_page_size) {
err = DB_TOO_BIG_RECORD;
goto func_exit;
}
ut_ad(b < &block[srv_sort_buf_size]);
DBUG_LOG("ib_merge_sort",
@ -5390,6 +5397,7 @@ bulk_rollback:
if (t.second.get_first() < low_limit)
low_limit= t.second.get_first();
delete t.second.bulk_store;
t.second.bulk_store= nullptr;
}
}
trx_savept_t bulk_save{low_limit};

View file

@ -146,7 +146,10 @@ inline void trx_t::rollback_low(trx_savept_t *savept)
trx_mod_tables_t::iterator j= i++;
ut_ad(j->second.valid());
if (j->second.rollback(limit))
{
j->second.clear_bulk_buffer();
mod_tables.erase(j);
}
else if (!apply_online_log)
apply_online_log= j->first->is_active_ddl();
}

View file

@ -596,9 +596,6 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation)
inspected = "HA_EXTRA_NO_AUTOINC_LOCKING";
break;
#endif
case HA_EXTRA_IGNORE_INSERT:
inspected = "HA_EXTRA_IGNORE_INSERT";
break;
}
return inspected;
}