Merge 10.6 into 10.11

This commit is contained in:
Marko Mäkelä 2025-09-24 12:48:56 +03:00
commit 990b44495c
27 changed files with 565 additions and 201 deletions

View file

@ -379,7 +379,15 @@ dict_table_schema_check(
return DB_STATS_DO_NOT_EXIST;
}
if (!table->is_readable() || !table->space) {
if (!table->is_readable()) {
/* table is not readable */
snprintf(errstr, errstr_sz,
"Table %s is not readable.",
req_schema->table_name_sql);
return DB_ERROR;
}
if (!table->space) {
/* missing tablespace */
snprintf(errstr, errstr_sz,
"Tablespace for table %s is missing.",
@ -3714,8 +3722,9 @@ dberr_t dict_stats_rename_table(const char *old_name, const char *new_name,
dict_fs2utf8(old_name, old_db, sizeof old_db, old_table, sizeof old_table);
dict_fs2utf8(new_name, new_db, sizeof new_db, new_table, sizeof new_table);
if (dict_table_t::is_temporary_name(old_name) ||
dict_table_t::is_temporary_name(new_name))
/* Delete the stats only if renaming the table from old table to
intermediate table during COPY algorithm */
if (dict_table_t::is_temporary_name(new_name))
{
if (dberr_t e= dict_stats_delete_from_table_stats(old_db, old_table, trx))
return e;

View file

@ -15834,16 +15834,17 @@ ha_innobase::extra(
/* Warning: since it is not sure that MariaDB calls external_lock()
before calling this function, m_prebuilt->trx can be obsolete! */
trx_t* trx;
THD* thd = ha_thd();
switch (operation) {
case HA_EXTRA_FLUSH:
(void)check_trx_exists(ha_thd());
(void)check_trx_exists(thd);
if (m_prebuilt->blob_heap) {
row_mysql_prebuilt_free_blob_heap(m_prebuilt);
}
break;
case HA_EXTRA_RESET_STATE:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
reset_template();
trx->duplicates = 0;
stmt_boundary:
@ -15852,23 +15853,23 @@ ha_innobase::extra(
trx->bulk_insert &= TRX_DDL_BULK;
break;
case HA_EXTRA_NO_KEYREAD:
(void)check_trx_exists(ha_thd());
(void)check_trx_exists(thd);
m_prebuilt->read_just_key = 0;
break;
case HA_EXTRA_KEYREAD:
(void)check_trx_exists(ha_thd());
(void)check_trx_exists(thd);
m_prebuilt->read_just_key = 1;
break;
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
(void)check_trx_exists(ha_thd());
(void)check_trx_exists(thd);
m_prebuilt->keep_other_fields_on_keyread = 1;
break;
case HA_EXTRA_INSERT_WITH_UPDATE:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
trx->duplicates |= TRX_DUP_IGNORE;
goto stmt_boundary;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
trx->duplicates &= ~TRX_DUP_IGNORE;
if (trx->is_bulk_insert()) {
/* Allow a subsequent INSERT into an empty table
@ -15881,11 +15882,11 @@ ha_innobase::extra(
}
goto stmt_boundary;
case HA_EXTRA_WRITE_CAN_REPLACE:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
trx->duplicates |= TRX_DUP_REPLACE;
goto stmt_boundary;
case HA_EXTRA_WRITE_CANNOT_REPLACE:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
trx->duplicates &= ~TRX_DUP_REPLACE;
if (trx->is_bulk_insert()) {
/* Allow a subsequent INSERT into an empty table
@ -15894,7 +15895,7 @@ ha_innobase::extra(
}
goto stmt_boundary;
case HA_EXTRA_BEGIN_ALTER_COPY:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
m_prebuilt->table->skip_alter_undo = 1;
if (m_prebuilt->table->is_temporary()
|| !m_prebuilt->table->versioned_by_id()) {
@ -15907,7 +15908,7 @@ ha_innobase::extra(
.first->second.set_versioned(0);
break;
case HA_EXTRA_END_ALTER_COPY:
trx = check_trx_exists(ha_thd());
trx = check_trx_exists(thd);
if (!m_prebuilt->table->skip_alter_undo) {
/* This could be invoked inside INSERT...SELECT.
We do not want any extra log writes, because
@ -15941,6 +15942,7 @@ ha_innobase::extra(
handler::extra(HA_EXTRA_BEGIN_ALTER_COPY). */
log_buffer_flush_to_disk();
}
alter_stats_rebuild(m_prebuilt->table, thd);
break;
case HA_EXTRA_ABORT_ALTER_COPY:
if (m_prebuilt->table->skip_alter_undo) {
@ -21335,3 +21337,25 @@ void ins_node_t::vers_update_end(row_prebuilt_t *prebuilt, bool history_row)
if (UNIV_LIKELY_NULL(local_heap))
mem_heap_free(local_heap);
}
/** Adjust the persistent statistics after rebuilding ALTER TABLE.
Remove statistics for dropped indexes, add statistics for created indexes
and rename statistics for renamed indexes.
@param table InnoDB table that was rebuilt by ALTER TABLE
@param thd alter table thread */
void alter_stats_rebuild(dict_table_t *table, THD *thd)
{
DBUG_ENTER("alter_stats_rebuild");
if (!table->space || !table->stats_is_persistent()
|| dict_stats_persistent_storage_check(false) != SCHEMA_OK)
DBUG_VOID_RETURN;
dberr_t ret= dict_stats_update_persistent(table);
if (ret == DB_SUCCESS)
ret= dict_stats_save(table);
if (ret != DB_SUCCESS)
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ALTER_INFO, "Error updating stats for table after"
" table rebuild: %s", ut_strerr(ret));
DBUG_VOID_RETURN;
}

View file

@ -914,3 +914,10 @@ ib_push_frm_error(
@return true if index column length exceeds limit */
MY_ATTRIBUTE((warn_unused_result))
bool too_big_key_part_length(size_t max_field_len, const KEY& key);
/** Adjust the persistent statistics after rebuilding ALTER TABLE.
Remove statistics for dropped indexes, add statistics for created indexes
and rename statistics for renamed indexes.
@param table_name Table name in MySQL
@param thd alter table thread */
void alter_stats_rebuild(dict_table_t *table, THD *thd);

View file

@ -11199,7 +11199,7 @@ Remove statistics for dropped indexes, add statistics for created indexes
and rename statistics for renamed indexes.
@param ha_alter_info Data used during in-place alter
@param ctx In-place ALTER TABLE context
@param thd MySQL connection
@param thd alter table thread
*/
static
void
@ -11231,46 +11231,6 @@ alter_stats_norebuild(
DBUG_VOID_RETURN;
}
/** Adjust the persistent statistics after rebuilding ALTER TABLE.
Remove statistics for dropped indexes, add statistics for created indexes
and rename statistics for renamed indexes.
@param table InnoDB table that was rebuilt by ALTER TABLE
@param table_name Table name in MySQL
@param thd MySQL connection
*/
static
void
alter_stats_rebuild(
/*================*/
dict_table_t* table,
const char* table_name,
THD* thd)
{
DBUG_ENTER("alter_stats_rebuild");
if (!table->space || !table->stats_is_persistent()
|| dict_stats_persistent_storage_check(false) != SCHEMA_OK) {
DBUG_VOID_RETURN;
}
dberr_t ret = dict_stats_update_persistent(table);
if (ret == DB_SUCCESS) {
ret = dict_stats_save(table);
}
if (ret != DB_SUCCESS) {
push_warning_printf(
thd,
Sql_condition::WARN_LEVEL_WARN,
ER_ALTER_INFO,
"Error updating stats for table '%s'"
" after table rebuild: %s",
table_name, ut_strerr(ret));
}
DBUG_VOID_RETURN;
}
/** Apply the log for the table rebuild operation.
@param[in] ctx Inplace Alter table context
@param[in] altered_table MySQL table that is being altered
@ -11941,9 +11901,7 @@ foreign_fail:
(*pctx);
DBUG_ASSERT(ctx->need_rebuild());
alter_stats_rebuild(
ctx->new_table, table->s->table_name.str,
m_user_thd);
alter_stats_rebuild(ctx->new_table, m_user_thd);
}
} else {
for (inplace_alter_handler_ctx** pctx = ctx_array;

View file

@ -2691,9 +2691,8 @@ all_done:
ut_ad((mrec == NULL) == (index->online_log->head.bytes == 0));
#ifdef UNIV_DEBUG
if (index->online_log->head.block &&
next_mrec_end == index->online_log->head.block
+ srv_sort_buf_size) {
if (next_mrec_end - srv_sort_buf_size
== index->online_log->head.block) {
/* If tail.bytes == 0, next_mrec_end can also be at
the end of tail.block. */
if (index->online_log->tail.bytes == 0) {
@ -2707,9 +2706,8 @@ all_done:
ut_ad(index->online_log->tail.blocks
> index->online_log->head.blocks);
}
} else if (index->online_log->tail.block &&
next_mrec_end == index->online_log->tail.block
+ index->online_log->tail.bytes) {
} else if (next_mrec_end - index->online_log->tail.bytes
== index->online_log->tail.block) {
ut_ad(next_mrec == index->online_log->tail.block
+ index->online_log->head.bytes);
ut_ad(index->online_log->tail.blocks == 0);
@ -2810,7 +2808,7 @@ process_next_block:
} else {
memcpy(index->online_log->head.buf, mrec,
ulint(mrec_end - mrec));
mrec_end += ulint(index->online_log->head.buf - mrec);
mrec_end -= ulint(mrec - index->online_log->head.buf);
mrec = index->online_log->head.buf;
goto process_next_block;
}
@ -3603,8 +3601,8 @@ all_done:
ut_ad((mrec == NULL) == (index->online_log->head.bytes == 0));
#ifdef UNIV_DEBUG
if (next_mrec_end == index->online_log->head.block
+ srv_sort_buf_size) {
if (next_mrec_end - srv_sort_buf_size
== index->online_log->head.block) {
/* If tail.bytes == 0, next_mrec_end can also be at
the end of tail.block. */
if (index->online_log->tail.bytes == 0) {
@ -3618,8 +3616,8 @@ all_done:
ut_ad(index->online_log->tail.blocks
> index->online_log->head.blocks);
}
} else if (next_mrec_end == index->online_log->tail.block
+ index->online_log->tail.bytes) {
} else if (next_mrec_end - index->online_log->tail.bytes
== index->online_log->tail.block) {
ut_ad(next_mrec == index->online_log->tail.block
+ index->online_log->head.bytes);
ut_ad(index->online_log->tail.blocks == 0);
@ -3702,7 +3700,7 @@ process_next_block:
} else {
memcpy(index->online_log->head.buf, mrec,
ulint(mrec_end - mrec));
mrec_end += ulint(index->online_log->head.buf - mrec);
mrec_end -= ulint(mrec - index->online_log->head.buf);
mrec = index->online_log->head.buf;
goto process_next_block;
}