For performance reasons we remove the ability in unique indexes on autoincrements to remove the ability to insert key lower then the current autoincrement value.

mysql-test/r/archive.result:
  Modified error output
mysql-test/t/archive.test:
  Fix for the change in behavior
storage/archive/ha_archive.cc:
  ifdef of the search record code
This commit is contained in:
unknown 2007-01-26 14:17:45 -08:00
parent f69cfbf857
commit c95bc8dcdb
3 changed files with 7 additions and 6 deletions

View file

@ -12367,6 +12367,7 @@ INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (32, "foo");
INSERT INTO t5 VALUES (23, "foo");
ERROR 23000: Can't write; duplicate key in table 't5'
INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (3, "foo");
@ -12380,7 +12381,6 @@ a b
4 foo
5 foo
32 foo
23 foo
33 foo
34 foo
35 foo

View file

@ -1379,6 +1379,7 @@ INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (32, "foo");
--error 1022
INSERT INTO t5 VALUES (23, "foo");
INSERT INTO t5 VALUES (NULL, "foo");
INSERT INTO t5 VALUES (NULL, "foo");

View file

@ -784,23 +784,22 @@ int ha_archive::write_row(byte *buf)
temp_auto= table->next_number_field->val_int();
/*
Simple optimization to see if we fail for duplicate key immediatly
because we have just given out this value.
We don't support decremening auto_increment. They make the performance
just cry.
*/
if (temp_auto == share->archive_write.auto_increment &&
if (temp_auto <= share->archive_write.auto_increment &&
mkey->flags & HA_NOSAME)
{
rc= HA_ERR_FOUND_DUPP_KEY;
goto error;
}
#ifdef DEAD_CODE
/*
Bad news, this will cause a search for the unique value which is very
expensive since we will have to do a table scan which will lock up
all other writers during this period. This could perhaps be optimized
in the future.
*/
if (temp_auto < share->archive_write.auto_increment &&
mkey->flags & HA_NOSAME)
{
/*
First we create a buffer that we can use for reading rows, and can pass
@ -838,6 +837,7 @@ int ha_archive::write_row(byte *buf)
}
}
}
#endif
else
{
if (temp_auto > share->archive_write.auto_increment)