mirror of
https://github.com/MariaDB/server.git
synced 2025-01-30 02:30:06 +01:00
MDEV-25654 Unexpected ER_CRASHED_ON_USAGE and Assertion
`limit >= trx_id' failed in purge_node_t::skip For fast alter partition ALTER lost hash fields in frm field count. mysql_prepare_create_table() did not call add_hash_field() because the logic of ALTER-ing field types implies automatic promotion/demotion to/from hash index. So we don't pass hash algorithm to mysql_prepare_create_table() and let it decide itself, but it cannot decide it correctly for fast alter partition. So now mysql_prepare_alter_table() is a bit more sophisticated on what to pass in the algorithm. If not changed any fields it will force mysql_prepare_create_table() to re-add hash fields by setting HA_KEY_ALG_HASH. The problem with the original logic is mysql_prepare_alter_table() does not care 100% about hash property so the decision is blurred between mysql_prepare_alter_table() and mysql_prepare_create_table().
This commit is contained in:
parent
0cf2176b79
commit
0dcd30197a
3 changed files with 41 additions and 1 deletions
|
@ -729,4 +729,16 @@ alter table t1 enable keys;
|
||||||
insert into t1 values (2);
|
insert into t1 values (2);
|
||||||
ERROR 23000: Duplicate entry '2' for key 'i'
|
ERROR 23000: Duplicate entry '2' for key 'i'
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
#
|
||||||
|
# MDEV-25654 Unexpected ER_CRASHED_ON_USAGE and Assertion `limit >= trx_id' failed in purge_node_t::skip
|
||||||
|
#
|
||||||
|
create table t1 (a int, unique using hash (a)) engine=innodb
|
||||||
|
partition by range(a) (
|
||||||
|
partition p1 values less than (2),
|
||||||
|
partition p2 values less than (101)
|
||||||
|
);
|
||||||
|
insert into t1 select seq from seq_1_to_100;
|
||||||
|
alter table t1 add partition (partition p3 values less than (maxvalue));
|
||||||
|
alter table t1 force;
|
||||||
|
drop table t1;
|
||||||
# End of 10.5 tests
|
# End of 10.5 tests
|
||||||
|
|
|
@ -706,4 +706,19 @@ alter table t1 enable keys;
|
||||||
insert into t1 values (2);
|
insert into t1 values (2);
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-25654 Unexpected ER_CRASHED_ON_USAGE and Assertion `limit >= trx_id' failed in purge_node_t::skip
|
||||||
|
--echo #
|
||||||
|
create table t1 (a int, unique using hash (a)) engine=innodb
|
||||||
|
partition by range(a) (
|
||||||
|
partition p1 values less than (2),
|
||||||
|
partition p2 values less than (101)
|
||||||
|
);
|
||||||
|
insert into t1 select seq from seq_1_to_100;
|
||||||
|
|
||||||
|
alter table t1 add partition (partition p3 values less than (maxvalue));
|
||||||
|
alter table t1 force;
|
||||||
|
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
--echo # End of 10.5 tests
|
--echo # End of 10.5 tests
|
||||||
|
|
|
@ -9264,7 +9264,20 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
||||||
LEX_CSTRING tmp_name;
|
LEX_CSTRING tmp_name;
|
||||||
bzero((char*) &key_create_info, sizeof(key_create_info));
|
bzero((char*) &key_create_info, sizeof(key_create_info));
|
||||||
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||||
key_info->algorithm= HA_KEY_ALG_UNDEF;
|
key_info->algorithm= (alter_info->flags & ALTER_CHANGE_COLUMN) ?
|
||||||
|
HA_KEY_ALG_UNDEF : HA_KEY_ALG_HASH;
|
||||||
|
/*
|
||||||
|
This one goes to mysql_prepare_create_table():
|
||||||
|
|
||||||
|
key_info->algorithm= key->key_create_info.algorithm;
|
||||||
|
|
||||||
|
For HA_KEY_ALG_LONG_HASH if we didn't change ANY column, we pass
|
||||||
|
HA_KEY_ALG_HASH to ensure mysql_prepare_create_table() does add_hash_field().
|
||||||
|
This protects fast alter partition from losing hash properties.
|
||||||
|
In case of any column changes we drop algorithm to HA_KEY_ALG_UNDEF and
|
||||||
|
let decide mysql_prepare_create_table() if the hash field is needed
|
||||||
|
depending on new types.
|
||||||
|
*/
|
||||||
key_create_info.algorithm= key_info->algorithm;
|
key_create_info.algorithm= key_info->algorithm;
|
||||||
/*
|
/*
|
||||||
We copy block size directly as some engines, like Area, sets this
|
We copy block size directly as some engines, like Area, sets this
|
||||||
|
|
Loading…
Add table
Reference in a new issue