mirror of
https://github.com/MariaDB/server.git
synced 2025-02-15 01:45:33 +01:00
MDEV-21540 Initialization of already inited long unique index on reorganize partition
Handler for existing partition was already index-inited at the beginning of copy_partitions(). In the case of REORGANIZE PARTITION we fill new partition by calling its ha_write_row() (handler is storage engine of new partition). From that we go through the below conditions: if (this->inited == RND) table->clone_handler_for_update(); handler *h= table->update_handler ? table->update_handler : table->file; First, the above misses the meaning of this->inited check. Now it is new partition and this handler is not inited. So, we assign table->file which is ha_partition and is really not known to be inited or not. It is supposed (this == table->file), otherwise we are out of the logic for using update_handler. This patch adds DBUG_ASSERT for that. Second, we call check_duplicate_long_entries() for table->file and that calls ha_partition::index_init() which calls index_init() for each partition's handler. But the existing parititions' handlers was already inited in copy_partitions() and we fail on assertion. The fix implies that we don't need check_duplicate_long_entries() per-partition as we've already done check_duplicate_long_entries() for ha_partition. For REORGANIZE PARTITION that means existing row was already checked at previous INSERT/UPDATE commands, so no need to check it again (see NOTE in handler::ha_write_row()). The fix also optimizes ha_update_row() so check_duplicate_long_entries_update() is not called per-partition considering it was already called for ha_partition. Besides, per-partition duplicate check is not really usable.
This commit is contained in:
parent
f0107c90a0
commit
231feabd2b
3 changed files with 69 additions and 3 deletions
|
@ -315,5 +315,24 @@ update t1,t2 set v1 = v2 , v5 = 0;
|
|||
ERROR 23000: Duplicate entry '-128' for key 'v1'
|
||||
drop table t1, t2;
|
||||
#
|
||||
# MDEV-21540 Initialization of already inited long unique index on reorganize partition
|
||||
#
|
||||
create table t1 (x int, a blob)
|
||||
partition by range (x) (
|
||||
partition p1 values less than (50),
|
||||
partition pn values less than maxvalue);
|
||||
insert into t1 values (1, 1), (100, 1);
|
||||
alter table t1 add unique key (a);
|
||||
ERROR 23000: Duplicate entry '1' for key 'a'
|
||||
update t1 set a= x;
|
||||
alter table t1 add unique key (a);
|
||||
update t1 set a= 1;
|
||||
ERROR 23000: Duplicate entry '1' for key 'a'
|
||||
update t1 set a= x + 1;
|
||||
alter table t1 reorganize partition p1 into (
|
||||
partition n0 values less than (10),
|
||||
partition n1 values less than (50));
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
|
|
|
@ -396,6 +396,33 @@ update t1 set v2 = 1, v3 = -128;
|
|||
update t1,t2 set v1 = v2 , v5 = 0;
|
||||
drop table t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-21540 Initialization of already inited long unique index on reorganize partition
|
||||
--echo #
|
||||
create table t1 (x int, a blob)
|
||||
partition by range (x) (
|
||||
partition p1 values less than (50),
|
||||
partition pn values less than maxvalue);
|
||||
|
||||
insert into t1 values (1, 1), (100, 1);
|
||||
|
||||
# a little bit of additional checks
|
||||
--error ER_DUP_ENTRY
|
||||
alter table t1 add unique key (a);
|
||||
|
||||
update t1 set a= x;
|
||||
alter table t1 add unique key (a);
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set a= 1;
|
||||
update t1 set a= x + 1;
|
||||
|
||||
# bug failure
|
||||
alter table t1 reorganize partition p1 into (
|
||||
partition n0 values less than (10),
|
||||
partition n1 values less than (50));
|
||||
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
|
|
@ -6782,7 +6782,18 @@ int handler::ha_write_row(const uchar *buf)
|
|||
mark_trx_read_write();
|
||||
increment_statistics(&SSV::ha_write_count);
|
||||
|
||||
if (table->s->long_unique_table)
|
||||
/*
|
||||
NOTE: this != table->file is true in 3 cases:
|
||||
|
||||
1. under copy_partitions() (REORGANIZE PARTITION): that does not
|
||||
require long unique check as it does not introduce new rows or new index.
|
||||
2. under partition's ha_write_row() (INSERT): check_duplicate_long_entries()
|
||||
was already done by ha_partition::ha_write_row(), no need to check it
|
||||
again for each single partition.
|
||||
3. under ha_mroonga::wrapper_write_row()
|
||||
*/
|
||||
|
||||
if (table->s->long_unique_table && this == table->file)
|
||||
{
|
||||
if (this->inited == RND)
|
||||
table->clone_handler_for_update();
|
||||
|
@ -6830,8 +6841,17 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
|
|||
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
|
||||
mark_trx_read_write();
|
||||
increment_statistics(&SSV::ha_update_count);
|
||||
if (table->s->long_unique_table &&
|
||||
(error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data)))
|
||||
|
||||
/*
|
||||
NOTE: this != table->file is true under partition's ha_update_row():
|
||||
check_duplicate_long_entries_update() was already done by
|
||||
ha_partition::ha_update_row(), no need to check it again for each single
|
||||
partition. Same applies to ha_mroonga wrapper.
|
||||
*/
|
||||
|
||||
if (table->s->long_unique_table && this == table->file &&
|
||||
(error= check_duplicate_long_entries_update(table, table->file,
|
||||
(uchar *)new_data)))
|
||||
{
|
||||
return error;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue