mirror of
https://github.com/MariaDB/server.git
synced 2025-01-29 02:05:57 +01:00
MDEV-371 Unique Index for long columns
This patch implements engine independent unique hash index. Usage:- Unique HASH index can be created automatically for blob/varchar/test column whose key length > handler->max_key_length() or it can be explicitly specified. Automatic Creation:- Create TABLE t1 (a blob unique); Explicit Creation:- Create TABLE t1 (a int , unique(a) using HASH); Internal KEY_PART Representations:- Long unique key_info will have 2 representations. (lets understand this with an example create table t1(a blob, b blob , unique(a, b)); ) 1. User Given Representation:- key_info->key_part array will be similar to what user has defined. So in case of example it will have 2 key_parts (a, b) 2. Storage Engine Representation:- In this case there will be only one key_part and it will point to HASH_FIELD. This key_part will be always after user defined key_parts. So:- User Given Representation [a] [b] [hash_key_part] key_info->key_part ----^ Storage Engine Representation [a] [b] [hash_key_part] key_info->key_part ------------^ Table->s->key_info will have User Given Representation, While table->key_info will have Storage Engine Representation.Representation can be changed into each other by calling re/setup_keyinfo_hash function. Working:- 1. So when user specifies HASH_INDEX or key_length is > handler->max_key_length(), In mysql_prepare_create_table One extra vfield is added (for each long unique key). And key_info->algorithm is set to HA_KEY_ALG_LONG_HASH. 2. In init_from_binary_frm_image values for hash_keypart is set (like fieldnr , field and flags) 3. In parse_vcol_defs, HASH_FIELD->vcol_info is created. Item_func_hash is used with list of Item_fields, When Explicit length is given by user then Item_left is used to concatenate Item_field values. 4. In ha_write_row/ha_update_row check_duplicate_long_entry_key is called which will create the hash key from table->record[0] and then call ha_index_read_map , if we found duplicated hash , we will compare the result field by field.
This commit is contained in:
parent
5b4d6595d2
commit
d00f19e832
46 changed files with 4118 additions and 98 deletions
|
@ -99,7 +99,8 @@ enum ha_key_alg {
|
|||
HA_KEY_ALG_BTREE= 1, /* B-tree, default one */
|
||||
HA_KEY_ALG_RTREE= 2, /* R-tree, for spatial searches */
|
||||
HA_KEY_ALG_HASH= 3, /* HASH keys (HEAP tables) */
|
||||
HA_KEY_ALG_FULLTEXT= 4 /* FULLTEXT (MyISAM tables) */
|
||||
HA_KEY_ALG_FULLTEXT= 4, /* FULLTEXT (MyISAM tables) */
|
||||
HA_KEY_ALG_LONG_HASH= 5 /* long BLOB keys */
|
||||
};
|
||||
|
||||
/* Storage media types */
|
||||
|
|
|
@ -203,6 +203,8 @@ enum enum_indicator_type
|
|||
#define VERS_UPDATE_UNVERSIONED_FLAG (1 << 29) /* column that doesn't support
|
||||
system versioning when table
|
||||
itself supports it*/
|
||||
#define LONG_UNIQUE_HASH_FIELD (1<< 30) /* This field will store hash for unique
|
||||
column */
|
||||
|
||||
#define REFRESH_GRANT (1ULL << 0) /* Refresh grant tables */
|
||||
#define REFRESH_LOG (1ULL << 1) /* Start on new log file */
|
||||
|
|
1408
mysql-test/main/long_unique.result
Normal file
1408
mysql-test/main/long_unique.result
Normal file
File diff suppressed because it is too large
Load diff
511
mysql-test/main/long_unique.test
Normal file
511
mysql-test/main/long_unique.test
Normal file
|
@ -0,0 +1,511 @@
|
|||
let datadir=`select @@datadir`;
|
||||
--source include/have_partition.inc
|
||||
|
||||
--echo #Structure of tests
|
||||
--echo #First we will check all option for
|
||||
--echo #table containing single unique column
|
||||
--echo #table containing keys like unique(a,b,c,d) etc
|
||||
--echo #then table containing 2 blob unique etc
|
||||
set @allowed_packet= @@max_allowed_packet;
|
||||
--echo #table with single long blob column;
|
||||
create table t1(a blob unique );
|
||||
insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
|
||||
--echo #blob with primary key not allowed
|
||||
--error ER_TOO_LONG_KEY
|
||||
create table t2(a blob,primary key(a(10000)));
|
||||
--error ER_TOO_LONG_KEY
|
||||
create table t3(a varchar(10000) primary key);
|
||||
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(2);
|
||||
--echo #table structure;
|
||||
desc t1;
|
||||
show create table t1;
|
||||
query_vertical show keys from t1;
|
||||
replace_result $datadir DATADIR;
|
||||
exec $MYISAMCHK -d $datadir/test/t1;
|
||||
select * from information_schema.columns where table_schema = 'test' and table_name = 't1';
|
||||
select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
|
||||
query_vertical select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
|
||||
--echo # table select we should not be able to see db_row_hash_column;
|
||||
select * from t1 order by a;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_1 from t1;
|
||||
--echo #duplicate entry test;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(2);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('sachin');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(123456789034567891);
|
||||
select * from t1 order by a;
|
||||
insert into t1 values(11),(22),(33);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(12),(22);
|
||||
select * from t1 order by a;
|
||||
insert into t1 values(repeat('s',4000*10)),(repeat('s',4001*10));
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(repeat('m',4000*10)),(repeat('m',4000*10));
|
||||
insert into t1 values(repeat('m',4001)),(repeat('m',4002));
|
||||
truncate table t1;
|
||||
insert into t1 values(1),(2),(3),(4),(5),(8),(7);
|
||||
replace_result $datadir DATADIR;
|
||||
exec $MYISAMCHK -d $datadir/test/t1;
|
||||
--echo #now some alter commands;
|
||||
alter table t1 add column b int;
|
||||
desc t1;
|
||||
show create table t1;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,2);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(2,2);
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_1 from t1;
|
||||
--echo #now try to change db_row_hash_1 column;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 drop column db_row_hash_1;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 add column d int , add column e int , drop column db_row_hash_1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 modify column db_row_hash_1 int ;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 add column a int , add column b int, modify column db_row_hash_1 int ;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 change column db_row_hash_1 dsds int;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 add column asd int, change column db_row_hash_1 dsds int;
|
||||
alter table t1 drop column b , add column c int;
|
||||
show create table t1;
|
||||
|
||||
--echo #now add some column with name db_row_hash;
|
||||
alter table t1 add column db_row_hash_1 int unique;
|
||||
show create table t1;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(45,1,55),(46,1,55);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(45,1,55),(45,1,55);
|
||||
alter table t1 add column db_row_hash_2 int, add column db_row_hash_3 int;
|
||||
desc t1;
|
||||
--echo #this should also drop the unique index ;
|
||||
alter table t1 drop column a;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo #add column with unique index on blob ;
|
||||
alter table t1 add column a blob unique;
|
||||
show create table t1;
|
||||
--echo # try to change the blob unique name;
|
||||
alter table t1 change column a aa blob ;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo # try to change the blob unique datatype;
|
||||
--echo #this will change index to b tree;
|
||||
alter table t1 modify column aa int ;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
alter table t1 add column clm blob unique;
|
||||
--echo #try changing the name ;
|
||||
alter table t1 change column clm clm_changed blob;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo #now drop the unique key;
|
||||
alter table t1 drop key clm;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1 (a TEXT CHARSET latin1 COLLATE latin1_german2_ci unique);
|
||||
desc t1;
|
||||
show keys from t1;
|
||||
insert into t1 values ('ae');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values ('AE');
|
||||
insert into t1 values ('Ä');
|
||||
drop table t1;
|
||||
create table t1 (a int primary key, b blob unique);
|
||||
desc t1;
|
||||
show keys from t1;
|
||||
insert into t1 values(1,1),(2,2),(3,3);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(7,1);
|
||||
drop table t1;
|
||||
|
||||
--echo #table with multiple long blob column and varchar text column ;
|
||||
create table t1(a blob unique, b int , c blob unique , d text unique , e varchar(3000) unique);
|
||||
insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555),
|
||||
('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'),
|
||||
(123456789034567891,353534,53453453453456,64565464564564,45435345345345),
|
||||
(123456789034567890,43545,657567567567,78967657567567,657567567567567676);
|
||||
|
||||
--echo #table structure;
|
||||
desc t1;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
replace_result $datadir DATADIR;
|
||||
exec $MYISAMCHK -d $datadir/test/t1;
|
||||
select * from information_schema.columns where table_schema = 'test' and table_name = 't1';
|
||||
select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
|
||||
select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
|
||||
--echo #table select we should not be able to see db_row_hash_1 column;
|
||||
select * from t1 order by a;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_1 from t1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_2 from t1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_3 from t1;
|
||||
--echo #duplicate entry test;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(21,2,3,42,51);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('sachin',null,null,null,null);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1234567890345671890,4353451,6575675675617,789676575675617,657567567567567676);
|
||||
select * from t1 order by a;
|
||||
insert into t1 values(repeat('s',4000*10),100,repeat('s',4000*10),repeat('s',4000*10),
|
||||
repeat('s',400)),(repeat('s',4001*10),1000,repeat('s',4001*10),repeat('s',4001*10),
|
||||
repeat('s',2995));
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(repeat('m',4000*11),10,repeat('s',4000*11),repeat('s',4000*11),repeat('s',2995));
|
||||
truncate table t1;
|
||||
insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555);
|
||||
--echo #now some alter commands;
|
||||
alter table t1 add column f int;
|
||||
desc t1;
|
||||
show create table t1;
|
||||
--echo #unique key should not break;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,2,3,4,5,6);
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_1 , db_row_hash_2, db_row_hash_3 from t1;
|
||||
--echo #now try to change db_row_hash_1 column;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
|
||||
alter table t1 drop column b , add column g int;
|
||||
show create table t1;
|
||||
|
||||
--echo #now add some column with name db_row_hash;
|
||||
alter table t1 add column db_row_hash_1 int unique;
|
||||
alter table t1 add column db_row_hash_2 int unique;
|
||||
alter table t1 add column db_row_hash_3 int unique;
|
||||
show create table t1;
|
||||
|
||||
alter table t1 add column db_row_hash_7 int, add column db_row_hash_5 int , add column db_row_hash_4 int ;
|
||||
alter table t1 drop column db_row_hash_7,drop column db_row_hash_3, drop column db_row_hash_4;
|
||||
desc t1;
|
||||
--echo #this should not break anything;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,2,3,4,5,6,23,5,6);
|
||||
--echo #this should also drop the unique index;
|
||||
alter table t1 drop column a, drop column c;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo #add column with unique index on blob;
|
||||
alter table t1 add column a blob unique;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo #try to change the blob unique column name;
|
||||
--echo #this will change index to b tree;
|
||||
alter table t1 modify column a int , modify column e int;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
alter table t1 add column clm1 blob unique,add column clm2 blob unique;
|
||||
--echo #try changing the name;
|
||||
alter table t1 change column clm1 clm_changed1 blob, change column clm2 clm_changed2 blob;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo #now drop the unique key;
|
||||
alter table t1 drop key clm1, drop key clm2;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
drop table t1;
|
||||
--echo #now the table with key on multiple columns; the ultimate test;
|
||||
create table t1(a blob, b int , c varchar(2000) , d text , e varchar(3000) , f longblob , g int , h text ,
|
||||
unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h));
|
||||
|
||||
insert into t1 values(1,1,1,1,1,1,1,1),(2,2,2,2,2,2,2,2),(3,3,3,3,3,3,3,3),(4,4,4,4,4,4,4,4),(5,5,5,5,5,5,5,5),
|
||||
('maria',6,'maria','maria','maria','maria',6,'maria'),('mariadb',7,'mariadb','mariadb','mariadb','mariadb',8,'mariadb')
|
||||
,(null,null,null,null,null,null,null,null),(null,null,null,null,null,null,null,null);
|
||||
|
||||
--echo #table structure;
|
||||
desc t1;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
replace_result $datadir DATADIR;
|
||||
exec $MYISAMCHK -d $datadir/test/t1;
|
||||
select * from information_schema.columns where table_schema = 'test' and table_name = 't1';
|
||||
select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
|
||||
select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
|
||||
--echo # table select we should not be able to see db_row_hash_1 column;
|
||||
select * from t1 order by a;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_1 from t1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_2 from t1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_3 from t1;
|
||||
--echo #duplicate entry test;
|
||||
--echo #duplicate keys entry;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,1,1,0,0,0,0,0);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(0,0,1,1,1,0,0,0);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(0,0,0,0,1,1,1,1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,1,1,1,1,0,0,0);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(0,0,0,0,1,1,1,1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,1,1,1,1,1,1,1);
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select db_row_hash_1,db_row_hash_2,db_row_hash_3,db_row_hash_4,db_row_hash_5 from t1;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
|
||||
|
||||
show create table t1;
|
||||
--echo # add column named db_row_hash_*;
|
||||
alter table t1 add column db_row_hash_7 int , add column db_row_hash_5 int,
|
||||
add column db_row_hash_1 int, add column db_row_hash_2 int;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
alter table t1 drop column db_row_hash_7 , drop column db_row_hash_5 ,
|
||||
drop column db_row_hash_1, drop column db_row_hash_2 ;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
|
||||
--echo #try to change column names;
|
||||
alter table t1 change column a aa blob , change column b bb blob , change column d dd blob;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
alter table t1 change column aa a blob , change column bb b blob , change column dd d blob;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
|
||||
--echo #now we will change the data type to int and varchar limit so that we no longer require hash_index;
|
||||
--echo #on key a_b_c;
|
||||
alter table t1 modify column a varchar(20) , modify column b varchar(20) , modify column c varchar(20);
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
--echo #change it back;
|
||||
alter table t1 modify column a blob , modify column b blob , modify column c blob;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
|
||||
--echo #try to delete blob column in unique;
|
||||
truncate table t1;
|
||||
## this feature removed in 10.2
|
||||
#alter table t1 drop column a, drop column b, drop column c;
|
||||
#show create table t1;
|
||||
#show keys from t1;
|
||||
--echo #now try to delete keys;
|
||||
alter table t1 drop key c, drop key e;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #now alter table containing some data basically some tests with ignore;
|
||||
create table t1 (a blob);
|
||||
insert into t1 values(1),(2),(3);
|
||||
--echo #normal alter table;
|
||||
alter table t1 add unique key(a);
|
||||
alter table t1 drop key a;
|
||||
truncate table t1;
|
||||
insert into t1 values(1),(1),(2),(2),(3);
|
||||
--error ER_DUP_ENTRY
|
||||
alter table t1 add unique key(a);
|
||||
alter ignore table t1 add unique key(a);
|
||||
select * from t1 order by a;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1);
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #Now with multiple keys;
|
||||
create table t1(a blob , b blob, c blob , d blob , e int);
|
||||
insert into t1 values (1,1,1,1,1);
|
||||
insert into t1 values (1,1,1,1,1);
|
||||
insert into t1 values (2,1,1,1,1);
|
||||
insert into t1 values (2,2,2,2,2);
|
||||
insert into t1 values (3,3,4,4,4);
|
||||
insert into t1 values (4,4,4,4,4);
|
||||
--error ER_DUP_ENTRY
|
||||
alter table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
|
||||
alter ignore table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
|
||||
select * from t1 order by a;
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values (1,12,1,13,14);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values (12,1,14,1,14);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values (13,12,13,14,4);
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #visibility of db_row_hash
|
||||
create table t1 (a blob unique , b blob unique);
|
||||
desc t1;
|
||||
insert into t1 values(1,19);
|
||||
insert into t1 values(2,29);
|
||||
insert into t1 values(3,39);
|
||||
insert into t1 values(4,49);
|
||||
create table t2 (DB_ROW_HASH_1 int, DB_ROW_HASH_2 int);
|
||||
insert into t2 values(11,1);
|
||||
insert into t2 values(22,2);
|
||||
insert into t2 values(33,3);
|
||||
insert into t2 values(44,4);
|
||||
select * from t1 order by a;
|
||||
select * from t2 order by DB_ROW_HASH_1;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
|
||||
--echo #bug
|
||||
select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
|
||||
select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
|
||||
select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
|
||||
--error ER_BAD_FIELD_ERROR
|
||||
select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
|
||||
select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
|
||||
drop table t1,t2;
|
||||
|
||||
--echo #very long blob entry;
|
||||
SET @@GLOBAL.max_allowed_packet=67108864;
|
||||
|
||||
connect ('newcon', localhost, root,,);
|
||||
--connection newcon
|
||||
show variables like 'max_allowed_packet';
|
||||
create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
|
||||
desc t1;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
|
||||
concat(repeat('sachin',10000000),'1'));
|
||||
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
|
||||
concat(repeat('sachin',10000000),'1'));
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
|
||||
concat(repeat('sachin',10000000),'4'));
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
|
||||
concat(repeat('sachin',10000000),'1'));
|
||||
drop table t1;
|
||||
|
||||
--echo #long key unique with different key length
|
||||
create table t1(a blob, unique(a(3000)));
|
||||
desc t1;
|
||||
show keys from t1;
|
||||
show create table t1;
|
||||
insert into t1 value(concat(repeat('s',3000),'1'));
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 value(concat(repeat('s',3000),'2'));
|
||||
insert into t1 value(concat(repeat('a',3000),'2'));
|
||||
drop table t1;
|
||||
|
||||
create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
|
||||
unique(a(3500), b), unique(c(4500), d));
|
||||
desc t1;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
drop table t1;
|
||||
disconnect newcon;
|
||||
--connection default
|
||||
SET @@GLOBAL.max_allowed_packet=4194304;
|
||||
--echo #ext bug
|
||||
create table t1(a int primary key, b blob unique, c int, d blob , index(c));
|
||||
show create table t1;
|
||||
insert into t1 values(1,23,1,33);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(2,23,1,33);
|
||||
drop table t1;
|
||||
create table t2 (a blob unique , c int , index(c));
|
||||
show create table t2;
|
||||
insert into t2 values(1,1);
|
||||
insert into t2 values(2,1);
|
||||
drop table t2;
|
||||
--echo #not null test
|
||||
create table t1(a blob unique not null);
|
||||
desc t1;
|
||||
show create table t1;
|
||||
insert into t1 values(1);
|
||||
insert into t1 values(3);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1);
|
||||
drop table t1;
|
||||
create table t1(a int primary key, b blob unique , c blob unique not null);
|
||||
insert into t1 values(1,1,1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(2,1,2);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(3,3,1);
|
||||
drop table t1;
|
||||
create table t1 (a blob unique not null, b blob not null, c blob not null, unique(b,c));
|
||||
desc t1;
|
||||
show create table t1;
|
||||
insert into t1 values (1, 2, 3);
|
||||
insert into t1 values (2, 1, 3);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values (2, 1, 3);
|
||||
drop table t1;
|
||||
|
||||
--echo #partition
|
||||
--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
|
||||
create table t1(a blob unique) partition by hash(a);
|
||||
--echo #key length > 2^16 -1
|
||||
--error ER_TOO_LONG_HASH_KEYSEG
|
||||
create table t1(a blob, unique(a(65536)));
|
||||
create table t1(a blob, unique(a(65535)));
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #64 indexes
|
||||
--let $create_table=create table t1 (
|
||||
--let $insert_data_1=insert into t1 values(
|
||||
--let $insert_data_2=insert into t1 values(
|
||||
--let $count= 63
|
||||
--let $index= 0
|
||||
while ($count)
|
||||
{
|
||||
--let $create_table=$create_table a$count blob unique,
|
||||
--let $insert_data_1=$insert_data_1 $count,
|
||||
--let $insert_data_2=$insert_data_2 $index,
|
||||
--dec $count
|
||||
--inc $index
|
||||
}
|
||||
--let $create_table=$create_table a blob unique);
|
||||
--let $insert_data_1=$insert_data_1 0);
|
||||
--let $insert_data_2=$insert_data_2 63);
|
||||
|
||||
--eval $create_table
|
||||
--eval $insert_data_1
|
||||
--error ER_DUP_ENTRY
|
||||
--eval $insert_data_1
|
||||
--eval $insert_data_2
|
||||
--error ER_DUP_ENTRY
|
||||
--eval $insert_data_2
|
||||
drop table t1;
|
||||
|
||||
set @@GLOBAL.max_allowed_packet= @allowed_packet;
|
579
mysql-test/main/long_unique_debug.result
Normal file
579
mysql-test/main/long_unique_debug.result
Normal file
|
@ -0,0 +1,579 @@
|
|||
#In this test case we will check what will happen in the case of hash collision
|
||||
SET debug_dbug="d,same_long_unique_hash";
|
||||
create table t1(a blob unique);
|
||||
FLUSH STATUS;
|
||||
insert into t1 values('xyz');
|
||||
insert into t1 values('abc');
|
||||
insert into t1 values('sachin');
|
||||
insert into t1 values('sachin');
|
||||
ERROR 23000: Duplicate entry 'sachin' for key 'a'
|
||||
insert into t1 values('maria');
|
||||
insert into t1 values('maria');
|
||||
ERROR 23000: Duplicate entry 'maria' for key 'a'
|
||||
drop table t1;
|
||||
SHOW STATUS LIKE 'handler_read_next';
|
||||
Variable_name Value
|
||||
Handler_read_next 11
|
||||
SET debug_dbug="";
|
||||
create table t1(a blob unique);
|
||||
FLUSH STATUS;
|
||||
insert into t1 values('xyz');
|
||||
insert into t1 values('abc');
|
||||
insert into t1 values('sachin');
|
||||
insert into t1 values('sachin');
|
||||
ERROR 23000: Duplicate entry 'sachin' for key 'a'
|
||||
insert into t1 values('maria');
|
||||
insert into t1 values('maria');
|
||||
ERROR 23000: Duplicate entry 'maria' for key 'a'
|
||||
drop table t1;
|
||||
SHOW STATUS LIKE 'handler_read_next';
|
||||
Variable_name Value
|
||||
Handler_read_next 0
|
||||
SET debug_dbug="d,same_long_unique_hash";
|
||||
create table t1(a blob unique, b blob unique);
|
||||
insert into t1 values('xyz', 11);
|
||||
insert into t1 values('abc', 22);
|
||||
insert into t1 values('sachin', 1);
|
||||
insert into t1 values('sachin', 4);
|
||||
ERROR 23000: Duplicate entry 'sachin' for key 'a'
|
||||
insert into t1 values('maria', 2);
|
||||
insert into t1 values('maria', 3);
|
||||
ERROR 23000: Duplicate entry 'maria' for key 'a'
|
||||
drop table t1;
|
||||
create table t1(a blob , b blob , unique(a,b));
|
||||
insert into t1 values('xyz', 11);
|
||||
insert into t1 values('abc', 22);
|
||||
insert into t1 values('sachin', 1);
|
||||
insert into t1 values('sachin', 1);
|
||||
ERROR 23000: Duplicate entry 'sachin-1' for key 'a'
|
||||
insert into t1 values('maria', 2);
|
||||
insert into t1 values('maria', 2);
|
||||
ERROR 23000: Duplicate entry 'maria-2' for key 'a'
|
||||
drop table t1;
|
||||
##Internal State of long unique tables
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1 ( a blob unique);
|
||||
Warnings:
|
||||
Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
|
||||
|
||||
Printing Table keyinfo
|
||||
|
||||
table->s->reclength 19
|
||||
table->s->fields 2
|
||||
|
||||
table->key_info[0] user_defined_key_parts = 1
|
||||
table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[0] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[0] user_defined_key_parts = 1
|
||||
table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[0].key_part[0] info
|
||||
key_part->offset = 11
|
||||
key_part->field_name = DB_ROW_HASH_1
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 1
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing table->fields
|
||||
|
||||
table->field[0]->field_name a
|
||||
table->field[0]->offset = 1
|
||||
table->field[0]->field_length = 10
|
||||
table->field[0]->null_pos wrt to record 0 = 0
|
||||
table->field[0]->null_bit_pos = 1
|
||||
|
||||
table->field[1]->field_name DB_ROW_HASH_1
|
||||
table->field[1]->offset = 11
|
||||
table->field[1]->field_length = 8
|
||||
table->field[1]->null_pos wrt to record 0 = 0
|
||||
table->field[1]->null_bit_pos = 2
|
||||
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1 ( a blob unique, b blob unique , c blob unique);
|
||||
Warnings:
|
||||
Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
|
||||
|
||||
Printing Table keyinfo
|
||||
|
||||
table->s->reclength 55
|
||||
table->s->fields 6
|
||||
|
||||
table->key_info[0] user_defined_key_parts = 1
|
||||
table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[0] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[0] user_defined_key_parts = 1
|
||||
table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[0].key_part[0] info
|
||||
key_part->offset = 31
|
||||
key_part->field_name = DB_ROW_HASH_1
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 8
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 1
|
||||
key_part->null_offset = 0
|
||||
|
||||
table->key_info[1] user_defined_key_parts = 1
|
||||
table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[1] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[1] user_defined_key_parts = 1
|
||||
table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[1] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[1].key_part[0] info
|
||||
key_part->offset = 39
|
||||
key_part->field_name = DB_ROW_HASH_2
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 16
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[1].key_part[0] info
|
||||
key_part->offset = 11
|
||||
key_part->field_name = b
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
table->key_info[2] user_defined_key_parts = 1
|
||||
table->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[2] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[2] user_defined_key_parts = 1
|
||||
table->s->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[2] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[2].key_part[0] info
|
||||
key_part->offset = 47
|
||||
key_part->field_name = DB_ROW_HASH_3
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 32
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[2].key_part[0] info
|
||||
key_part->offset = 21
|
||||
key_part->field_name = c
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 4
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing table->fields
|
||||
|
||||
table->field[0]->field_name a
|
||||
table->field[0]->offset = 1
|
||||
table->field[0]->field_length = 10
|
||||
table->field[0]->null_pos wrt to record 0 = 0
|
||||
table->field[0]->null_bit_pos = 1
|
||||
|
||||
table->field[1]->field_name b
|
||||
table->field[1]->offset = 11
|
||||
table->field[1]->field_length = 10
|
||||
table->field[1]->null_pos wrt to record 0 = 0
|
||||
table->field[1]->null_bit_pos = 2
|
||||
|
||||
table->field[2]->field_name c
|
||||
table->field[2]->offset = 21
|
||||
table->field[2]->field_length = 10
|
||||
table->field[2]->null_pos wrt to record 0 = 0
|
||||
table->field[2]->null_bit_pos = 4
|
||||
|
||||
table->field[3]->field_name DB_ROW_HASH_1
|
||||
table->field[3]->offset = 31
|
||||
table->field[3]->field_length = 8
|
||||
table->field[3]->null_pos wrt to record 0 = 0
|
||||
table->field[3]->null_bit_pos = 8
|
||||
|
||||
table->field[4]->field_name DB_ROW_HASH_2
|
||||
table->field[4]->offset = 39
|
||||
table->field[4]->field_length = 8
|
||||
table->field[4]->null_pos wrt to record 0 = 0
|
||||
table->field[4]->null_bit_pos = 16
|
||||
|
||||
table->field[5]->field_name DB_ROW_HASH_3
|
||||
table->field[5]->offset = 47
|
||||
table->field[5]->field_length = 8
|
||||
table->field[5]->null_pos wrt to record 0 = 0
|
||||
table->field[5]->null_bit_pos = 32
|
||||
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1 ( a blob , b blob , c blob , d blob , unique (a,b), unique(c, d));
|
||||
Warnings:
|
||||
Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
|
||||
|
||||
Printing Table keyinfo
|
||||
|
||||
table->s->reclength 57
|
||||
table->s->fields 6
|
||||
|
||||
table->key_info[0] user_defined_key_parts = 1
|
||||
table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[0] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[0] user_defined_key_parts = 2
|
||||
table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[0].key_part[0] info
|
||||
key_part->offset = 41
|
||||
key_part->field_name = DB_ROW_HASH_1
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 16
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 1
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[1] info
|
||||
key_part->offset = 11
|
||||
key_part->field_name = b
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
table->key_info[1] user_defined_key_parts = 1
|
||||
table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[1] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[1] user_defined_key_parts = 2
|
||||
table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[1] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[1].key_part[0] info
|
||||
key_part->offset = 49
|
||||
key_part->field_name = DB_ROW_HASH_2
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 32
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[1].key_part[0] info
|
||||
key_part->offset = 21
|
||||
key_part->field_name = c
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 4
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[1].key_part[1] info
|
||||
key_part->offset = 31
|
||||
key_part->field_name = d
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 8
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing table->fields
|
||||
|
||||
table->field[0]->field_name a
|
||||
table->field[0]->offset = 1
|
||||
table->field[0]->field_length = 10
|
||||
table->field[0]->null_pos wrt to record 0 = 0
|
||||
table->field[0]->null_bit_pos = 1
|
||||
|
||||
table->field[1]->field_name b
|
||||
table->field[1]->offset = 11
|
||||
table->field[1]->field_length = 10
|
||||
table->field[1]->null_pos wrt to record 0 = 0
|
||||
table->field[1]->null_bit_pos = 2
|
||||
|
||||
table->field[2]->field_name c
|
||||
table->field[2]->offset = 21
|
||||
table->field[2]->field_length = 10
|
||||
table->field[2]->null_pos wrt to record 0 = 0
|
||||
table->field[2]->null_bit_pos = 4
|
||||
|
||||
table->field[3]->field_name d
|
||||
table->field[3]->offset = 31
|
||||
table->field[3]->field_length = 10
|
||||
table->field[3]->null_pos wrt to record 0 = 0
|
||||
table->field[3]->null_bit_pos = 8
|
||||
|
||||
table->field[4]->field_name DB_ROW_HASH_1
|
||||
table->field[4]->offset = 41
|
||||
table->field[4]->field_length = 8
|
||||
table->field[4]->null_pos wrt to record 0 = 0
|
||||
table->field[4]->null_bit_pos = 16
|
||||
|
||||
table->field[5]->field_name DB_ROW_HASH_2
|
||||
table->field[5]->offset = 49
|
||||
table->field[5]->field_length = 8
|
||||
table->field[5]->null_pos wrt to record 0 = 0
|
||||
table->field[5]->null_bit_pos = 32
|
||||
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1(a int primary key, b blob unique , c blob unique not null);
|
||||
Warnings:
|
||||
Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
|
||||
|
||||
Printing Table keyinfo
|
||||
|
||||
table->s->reclength 41
|
||||
table->s->fields 5
|
||||
|
||||
table->key_info[0] user_defined_key_parts = 1
|
||||
table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
|
||||
table->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
table->s->key_info[0] user_defined_key_parts = 1
|
||||
table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
|
||||
table->s->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 0
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 0
|
||||
key_part->null_offset = 0
|
||||
|
||||
table->key_info[1] user_defined_key_parts = 1
|
||||
table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[1] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[1] user_defined_key_parts = 1
|
||||
table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[1] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[1].key_part[0] info
|
||||
key_part->offset = 25
|
||||
key_part->field_name = DB_ROW_HASH_1
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[1].key_part[0] info
|
||||
key_part->offset = 5
|
||||
key_part->field_name = b
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 1
|
||||
key_part->null_offset = 0
|
||||
|
||||
table->key_info[2] user_defined_key_parts = 1
|
||||
table->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[2] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[2] user_defined_key_parts = 1
|
||||
table->s->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[2] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[2].key_part[0] info
|
||||
key_part->offset = 33
|
||||
key_part->field_name = DB_ROW_HASH_2
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 0
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[2].key_part[0] info
|
||||
key_part->offset = 15
|
||||
key_part->field_name = c
|
||||
key_part->length = 0
|
||||
key_part->null_bit = 0
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing table->fields
|
||||
|
||||
table->field[0]->field_name a
|
||||
table->field[0]->offset = 1
|
||||
table->field[0]->field_length = 4
|
||||
table->field[0]->null_pos wrt to record 0 = -1
|
||||
table->field[0]->null_bit_pos = 0
|
||||
|
||||
table->field[1]->field_name b
|
||||
table->field[1]->offset = 5
|
||||
table->field[1]->field_length = 10
|
||||
table->field[1]->null_pos wrt to record 0 = 0
|
||||
table->field[1]->null_bit_pos = 1
|
||||
|
||||
table->field[2]->field_name c
|
||||
table->field[2]->offset = 15
|
||||
table->field[2]->field_length = 10
|
||||
table->field[2]->null_pos wrt to record 0 = -1
|
||||
table->field[2]->null_bit_pos = 0
|
||||
|
||||
table->field[3]->field_name DB_ROW_HASH_1
|
||||
table->field[3]->offset = 25
|
||||
table->field[3]->field_length = 8
|
||||
table->field[3]->null_pos wrt to record 0 = 0
|
||||
table->field[3]->null_bit_pos = 2
|
||||
|
||||
table->field[4]->field_name DB_ROW_HASH_2
|
||||
table->field[4]->offset = 33
|
||||
table->field[4]->field_length = 8
|
||||
table->field[4]->null_pos wrt to record 0 = -1
|
||||
table->field[4]->null_bit_pos = 0
|
||||
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
##Using hash
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash);
|
||||
Warnings:
|
||||
Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
|
||||
|
||||
Printing Table keyinfo
|
||||
|
||||
table->s->reclength 21
|
||||
table->s->fields 4
|
||||
|
||||
table->key_info[0] user_defined_key_parts = 1
|
||||
table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->key_info[0] flags & HA_NOSAME = 0
|
||||
|
||||
table->s->key_info[0] user_defined_key_parts = 3
|
||||
table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
|
||||
table->s->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[0].key_part[0] info
|
||||
key_part->offset = 13
|
||||
key_part->field_name = DB_ROW_HASH_1
|
||||
key_part->length = 8
|
||||
key_part->null_bit = 16
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[1] info
|
||||
key_part->offset = 5
|
||||
key_part->field_name = b
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 4
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[2] info
|
||||
key_part->offset = 9
|
||||
key_part->field_name = c
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 8
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing table->fields
|
||||
|
||||
table->field[0]->field_name a
|
||||
table->field[0]->offset = 1
|
||||
table->field[0]->field_length = 4
|
||||
table->field[0]->null_pos wrt to record 0 = 0
|
||||
table->field[0]->null_bit_pos = 2
|
||||
|
||||
table->field[1]->field_name b
|
||||
table->field[1]->offset = 5
|
||||
table->field[1]->field_length = 4
|
||||
table->field[1]->null_pos wrt to record 0 = 0
|
||||
table->field[1]->null_bit_pos = 4
|
||||
|
||||
table->field[2]->field_name c
|
||||
table->field[2]->offset = 9
|
||||
table->field[2]->field_length = 4
|
||||
table->field[2]->null_pos wrt to record 0 = 0
|
||||
table->field[2]->null_bit_pos = 8
|
||||
|
||||
table->field[3]->field_name DB_ROW_HASH_1
|
||||
table->field[3]->offset = 13
|
||||
table->field[3]->field_length = 8
|
||||
table->field[3]->null_pos wrt to record 0 = 0
|
||||
table->field[3]->null_bit_pos = 16
|
||||
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
##Using hash but with memory engine so no long unique column
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
|
||||
Warnings:
|
||||
Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
|
||||
|
||||
Printing Table keyinfo
|
||||
|
||||
table->s->reclength 13
|
||||
table->s->fields 3
|
||||
|
||||
table->key_info[0] user_defined_key_parts = 3
|
||||
table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
|
||||
table->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
table->s->key_info[0] user_defined_key_parts = 3
|
||||
table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
|
||||
table->s->key_info[0] flags & HA_NOSAME = 1
|
||||
|
||||
Printing table->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[0] info
|
||||
key_part->offset = 1
|
||||
key_part->field_name = a
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 2
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[1] info
|
||||
key_part->offset = 5
|
||||
key_part->field_name = b
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 4
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing share->key_info[0].key_part[2] info
|
||||
key_part->offset = 9
|
||||
key_part->field_name = c
|
||||
key_part->length = 4
|
||||
key_part->null_bit = 8
|
||||
key_part->null_offset = 0
|
||||
|
||||
Printing table->fields
|
||||
|
||||
table->field[0]->field_name a
|
||||
table->field[0]->offset = 1
|
||||
table->field[0]->field_length = 4
|
||||
table->field[0]->null_pos wrt to record 0 = 0
|
||||
table->field[0]->null_bit_pos = 2
|
||||
|
||||
table->field[1]->field_name b
|
||||
table->field[1]->offset = 5
|
||||
table->field[1]->field_length = 4
|
||||
table->field[1]->null_pos wrt to record 0 = 0
|
||||
table->field[1]->null_bit_pos = 4
|
||||
|
||||
table->field[2]->field_name c
|
||||
table->field[2]->offset = 9
|
||||
table->field[2]->field_length = 4
|
||||
table->field[2]->null_pos wrt to record 0 = 0
|
||||
table->field[2]->null_bit_pos = 8
|
||||
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
91
mysql-test/main/long_unique_debug.test
Normal file
91
mysql-test/main/long_unique_debug.test
Normal file
|
@ -0,0 +1,91 @@
|
|||
--source include/have_debug.inc
|
||||
--source include/have_innodb.inc
|
||||
--echo #In this test case we will check what will happen in the case of hash collision
|
||||
|
||||
SET debug_dbug="d,same_long_unique_hash";
|
||||
create table t1(a blob unique);
|
||||
|
||||
FLUSH STATUS;
|
||||
insert into t1 values('xyz');
|
||||
insert into t1 values('abc');
|
||||
insert into t1 values('sachin');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('sachin');
|
||||
insert into t1 values('maria');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('maria');
|
||||
drop table t1;
|
||||
SHOW STATUS LIKE 'handler_read_next';
|
||||
|
||||
SET debug_dbug="";
|
||||
create table t1(a blob unique);
|
||||
FLUSH STATUS;
|
||||
|
||||
insert into t1 values('xyz');
|
||||
insert into t1 values('abc');
|
||||
insert into t1 values('sachin');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('sachin');
|
||||
insert into t1 values('maria');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('maria');
|
||||
drop table t1;
|
||||
SHOW STATUS LIKE 'handler_read_next';
|
||||
|
||||
SET debug_dbug="d,same_long_unique_hash";
|
||||
create table t1(a blob unique, b blob unique);
|
||||
|
||||
insert into t1 values('xyz', 11);
|
||||
insert into t1 values('abc', 22);
|
||||
insert into t1 values('sachin', 1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('sachin', 4);
|
||||
insert into t1 values('maria', 2);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('maria', 3);
|
||||
drop table t1;
|
||||
|
||||
create table t1(a blob , b blob , unique(a,b));
|
||||
|
||||
insert into t1 values('xyz', 11);
|
||||
insert into t1 values('abc', 22);
|
||||
insert into t1 values('sachin', 1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('sachin', 1);
|
||||
insert into t1 values('maria', 2);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values('maria', 2);
|
||||
drop table t1;
|
||||
|
||||
--echo ##Internal State of long unique tables
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1 ( a blob unique);
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1 ( a blob unique, b blob unique , c blob unique);
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1 ( a blob , b blob , c blob , d blob , unique (a,b), unique(c, d));
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1(a int primary key, b blob unique , c blob unique not null);
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
|
||||
--echo ##Using hash
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash);
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
||||
|
||||
--echo ##Using hash but with memory engine so no long unique column
|
||||
SET debug_dbug="d,print_long_unique_internal_state";
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
|
||||
SET debug_dbug="";
|
||||
drop table t1;
|
123
mysql-test/main/long_unique_innodb.result
Normal file
123
mysql-test/main/long_unique_innodb.result
Normal file
|
@ -0,0 +1,123 @@
|
|||
create table t1(a blob unique) engine= InnoDB;
|
||||
insert into t1 values('RUC');
|
||||
insert into t1 values ('RUC');
|
||||
ERROR 23000: Duplicate entry 'RUC' for key 'a'
|
||||
drop table t1;
|
||||
#test for concurrent insert of long unique in innodb
|
||||
create table t1(a blob unique) engine= InnoDB;
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` blob DEFAULT NULL,
|
||||
UNIQUE KEY `a` (`a`) USING HASH
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
connect 'con1', localhost, root,,;
|
||||
connect 'con2', localhost, root,,;
|
||||
connection con1;
|
||||
set innodb_lock_wait_timeout= 2;
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RUC');
|
||||
connection con2;
|
||||
set innodb_lock_wait_timeout= 2;
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values ('RUC');
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
connection con1;
|
||||
commit;
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RC');
|
||||
connection con2;
|
||||
commit;
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values ('RC');
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
connection con1;
|
||||
commit;
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
insert into t1 values('RR');
|
||||
connection con2;
|
||||
commit;
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
insert into t1 values ('RR');
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
connection con1;
|
||||
commit;
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
insert into t1 values('S');
|
||||
connection con2;
|
||||
commit;
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
insert into t1 values ('S');
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
connection con1;
|
||||
commit;
|
||||
select * from t1;
|
||||
a
|
||||
RUC
|
||||
RC
|
||||
RR
|
||||
S
|
||||
drop table t1;
|
||||
create table t1(a blob unique) engine=Innodb;
|
||||
connection con1;
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RUC');
|
||||
connection con2;
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values ('RUC');;
|
||||
connection con1;
|
||||
rollback;
|
||||
connection con2;
|
||||
commit;
|
||||
connection con1;
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RC');
|
||||
connection con2;
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values ('RC');;
|
||||
connection con1;
|
||||
rollback;
|
||||
connection con2;
|
||||
commit;
|
||||
connection con1;
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
insert into t1 values('RR');
|
||||
connection con2;
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
insert into t1 values ('RR');;
|
||||
connection con1;
|
||||
rollback;
|
||||
connection con2;
|
||||
commit;
|
||||
connection con1;
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
insert into t1 values('S');
|
||||
connection con2;
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
insert into t1 values ('S');;
|
||||
connection con1;
|
||||
rollback;
|
||||
connection con2;
|
||||
commit;
|
||||
connection default;
|
||||
drop table t1;
|
||||
disconnect con1;
|
||||
disconnect con2;
|
135
mysql-test/main/long_unique_innodb.test
Normal file
135
mysql-test/main/long_unique_innodb.test
Normal file
|
@ -0,0 +1,135 @@
|
|||
--source include/have_innodb.inc
|
||||
|
||||
create table t1(a blob unique) engine= InnoDB;
|
||||
insert into t1 values('RUC');
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values ('RUC');
|
||||
drop table t1;
|
||||
|
||||
--echo #test for concurrent insert of long unique in innodb
|
||||
create table t1(a blob unique) engine= InnoDB;
|
||||
show create table t1;
|
||||
connect ('con1', localhost, root,,);
|
||||
connect ('con2', localhost, root,,);
|
||||
|
||||
--connection con1
|
||||
set innodb_lock_wait_timeout= 2;
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RUC');
|
||||
--connection con2
|
||||
set innodb_lock_wait_timeout= 2;
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
insert into t1 values ('RUC');
|
||||
|
||||
--connection con1
|
||||
commit;
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RC');
|
||||
--connection con2
|
||||
commit;
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
--error ER_DUP_ENTRY
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
insert into t1 values ('RC');
|
||||
commit;
|
||||
|
||||
--connection con1
|
||||
commit;
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
insert into t1 values('RR');
|
||||
--connection con2
|
||||
commit;
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
--error ER_DUP_ENTRY
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
insert into t1 values ('RR');
|
||||
|
||||
--connection con1
|
||||
commit;
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
insert into t1 values('S');
|
||||
--connection con2
|
||||
commit;
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
--error ER_DUP_ENTRY
|
||||
--error ER_LOCK_WAIT_TIMEOUT
|
||||
insert into t1 values ('S');
|
||||
commit;
|
||||
|
||||
--connection con1
|
||||
commit;
|
||||
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1(a blob unique) engine=Innodb;
|
||||
|
||||
--connection con1
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RUC');
|
||||
--connection con2
|
||||
set transaction isolation level READ UNCOMMITTED;
|
||||
start transaction;
|
||||
--send insert into t1 values ('RUC');
|
||||
--connection con1
|
||||
rollback;
|
||||
--connection con2
|
||||
--reap
|
||||
commit;
|
||||
|
||||
--connection con1
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
insert into t1 values('RC');
|
||||
--connection con2
|
||||
set transaction isolation level READ COMMITTED;
|
||||
start transaction;
|
||||
--send insert into t1 values ('RC');
|
||||
--connection con1
|
||||
rollback;
|
||||
--connection con2
|
||||
--reap
|
||||
commit;
|
||||
|
||||
--connection con1
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
insert into t1 values('RR');
|
||||
--connection con2
|
||||
set transaction isolation level REPEATABLE READ;
|
||||
start transaction;
|
||||
--send insert into t1 values ('RR');
|
||||
--connection con1
|
||||
rollback;
|
||||
--connection con2
|
||||
--reap
|
||||
commit;
|
||||
|
||||
--connection con1
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
insert into t1 values('S');
|
||||
--connection con2
|
||||
set transaction isolation level SERIALIZABLE;
|
||||
start transaction;
|
||||
--send insert into t1 values ('S');
|
||||
--connection con1
|
||||
rollback;
|
||||
--connection con2
|
||||
--reap
|
||||
commit;
|
||||
|
||||
connection default;
|
||||
drop table t1;
|
||||
disconnect con1;
|
||||
disconnect con2;
|
317
mysql-test/main/long_unique_update.result
Normal file
317
mysql-test/main/long_unique_update.result
Normal file
|
@ -0,0 +1,317 @@
|
|||
#structure of tests;
|
||||
#1 test of table containing single unique blob column;
|
||||
#2 test of table containing another unique int/ varchar etc column;
|
||||
#3 test of table containing multiple unique blob column like unique(a),unique(b);
|
||||
#4 test of table containing multiple multiple unique blob column like unique(a,b...),unique(c,d....);
|
||||
#structure of each test;
|
||||
#test if update works;
|
||||
#test update for duplicate entry;
|
||||
#test update for no change keys;
|
||||
#test update for ignore ;
|
||||
#test 1
|
||||
create table t1 (a blob unique);
|
||||
show keys from t1;
|
||||
Table t1
|
||||
Non_unique 0
|
||||
Key_name a
|
||||
Seq_in_index 1
|
||||
Column_name a
|
||||
Collation A
|
||||
Cardinality NULL
|
||||
Sub_part NULL
|
||||
Packed NULL
|
||||
Null YES
|
||||
Index_type HASH
|
||||
Comment
|
||||
Index_comment
|
||||
insert into t1 values(1),(2),(3),(4),(5);
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
update t1 set a=11 where a=5;
|
||||
update t1 set a=a+20 where a=1;
|
||||
select * from t1;
|
||||
a
|
||||
21
|
||||
2
|
||||
3
|
||||
4
|
||||
11
|
||||
update t1 set a=3 where a=2;
|
||||
ERROR 23000: Duplicate entry '3' for key 'a'
|
||||
update t1 set a=4 where a=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'a'
|
||||
#no change in blob key
|
||||
update t1 set a=3 where a=3;
|
||||
update t1 set a=2 where a=2;
|
||||
select* from t1;
|
||||
a
|
||||
21
|
||||
2
|
||||
3
|
||||
4
|
||||
11
|
||||
#IGNORE;
|
||||
update ignore t1 set a=3 where a=2;
|
||||
update ignore t1 set a=4 where a=3;
|
||||
select * from t1;
|
||||
a
|
||||
21
|
||||
2
|
||||
3
|
||||
4
|
||||
11
|
||||
drop table t1;
|
||||
#test 2;
|
||||
create table t1 (a int primary key, b blob unique , c int unique );
|
||||
show keys from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||
t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
|
||||
t1 0 b 1 b A NULL NULL NULL YES HASH
|
||||
t1 0 c 1 c A NULL NULL NULL YES BTREE
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
select * from t1 limit 3;
|
||||
a b c
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 3 3
|
||||
update t1 set b=34 where a=1;
|
||||
update t1 set b=a+c+b+34 where b=2;
|
||||
update t1 set b=a+10+b where c=3;
|
||||
select * from t1;
|
||||
a b c
|
||||
1 34 1
|
||||
2 40 2
|
||||
3 16 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
6 6 6
|
||||
7 7 7
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
update t1 set b=4 where a=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'b'
|
||||
update t1 set b=a+1 where b=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'b'
|
||||
update t1 set b=a+1 where c=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'b'
|
||||
#no change in blob key
|
||||
update t1 set b=3 where a=3;
|
||||
update t1 set b=2 where b=2;
|
||||
update t1 set b=5 where c=5;
|
||||
select* from t1;
|
||||
a b c
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
6 6 6
|
||||
7 7 7
|
||||
#IGNORE;
|
||||
update ignore t1 set b=3 where a=2;
|
||||
update ignore t1 set b=4 where b=3;
|
||||
update ignore t1 set b=5 where c=3;
|
||||
select * from t1;
|
||||
a b c
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
6 6 6
|
||||
7 7 7
|
||||
drop table t1;
|
||||
#test 3;
|
||||
create table t1 (a blob unique, b blob unique , c blob unique);
|
||||
show keys from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||
t1 0 a 1 a A NULL NULL NULL YES HASH
|
||||
t1 0 b 1 b A NULL NULL NULL YES HASH
|
||||
t1 0 c 1 c A NULL NULL NULL YES HASH
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
select * from t1 limit 3;
|
||||
a b c
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 3 3
|
||||
update t1 set b=34 where a=1;
|
||||
update t1 set b=a+c+b+34 where b=2;
|
||||
update t1 set b=a+10+b where c=3;
|
||||
select * from t1;
|
||||
a b c
|
||||
1 34 1
|
||||
2 40 2
|
||||
3 16 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
6 6 6
|
||||
7 7 7
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
update t1 set b=4 where a=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'b'
|
||||
update t1 set b=a+1 where b=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'b'
|
||||
update t1 set b=a+1 where c=3;
|
||||
ERROR 23000: Duplicate entry '4' for key 'b'
|
||||
#no change in blob key
|
||||
update t1 set b=3 where a=3;
|
||||
update t1 set b=2 where b=2;
|
||||
update t1 set b=5 where c=5;
|
||||
select* from t1;
|
||||
a b c
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 5 5
|
||||
6 6 6
|
||||
7 7 7
|
||||
#IGNORE;
|
||||
update ignore t1 set b=3 where a=2;
|
||||
update ignore t1 set b=4 where b=3;
|
||||
update ignore t1 set b=5 where c=3;
|
||||
update ignore t1 set b=b+3 where a>1 or b>1 or c>1;
|
||||
select * from t1;
|
||||
a b c
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 8 5
|
||||
6 9 6
|
||||
7 10 7
|
||||
update ignore t1 set b=b+5 where a>1 and b<5 and c<a+b;
|
||||
select * from t1;
|
||||
a b c
|
||||
1 1 1
|
||||
2 7 2
|
||||
3 3 3
|
||||
4 4 4
|
||||
5 8 5
|
||||
6 9 6
|
||||
7 10 7
|
||||
drop table t1;
|
||||
#test 4 ultimate test;
|
||||
create table t1 (a int primary key , b int, c blob , d blob , e varchar(2000), f int , g text,
|
||||
unique (b,c), unique (b,f),unique(e,g),unique(a,b,c,d,e,f,g));
|
||||
desc t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI NULL
|
||||
b int(11) YES MUL NULL
|
||||
c blob YES NULL
|
||||
d blob YES NULL
|
||||
e varchar(2000) YES MUL NULL
|
||||
f int(11) YES NULL
|
||||
g text YES NULL
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` int(11) NOT NULL,
|
||||
`b` int(11) DEFAULT NULL,
|
||||
`c` blob DEFAULT NULL,
|
||||
`d` blob DEFAULT NULL,
|
||||
`e` varchar(2000) DEFAULT NULL,
|
||||
`f` int(11) DEFAULT NULL,
|
||||
`g` text DEFAULT NULL,
|
||||
PRIMARY KEY (`a`),
|
||||
UNIQUE KEY `b` (`b`,`c`) USING HASH,
|
||||
UNIQUE KEY `b_2` (`b`,`f`),
|
||||
UNIQUE KEY `e` (`e`,`g`) USING HASH,
|
||||
UNIQUE KEY `a` (`a`,`b`,`c`,`d`,`e`,`f`,`g`) USING HASH
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
show keys from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||
t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
|
||||
t1 0 b 1 b A NULL NULL NULL YES HASH
|
||||
t1 0 b 2 c A NULL NULL NULL YES HASH
|
||||
t1 0 b_2 1 b A NULL NULL NULL YES BTREE
|
||||
t1 0 b_2 2 f A NULL NULL NULL YES BTREE
|
||||
t1 0 e 1 e A NULL NULL NULL YES HASH
|
||||
t1 0 e 2 g A NULL NULL NULL YES HASH
|
||||
t1 0 a 1 a A NULL NULL NULL HASH
|
||||
t1 0 a 2 b A NULL NULL NULL YES HASH
|
||||
t1 0 a 3 c A NULL NULL NULL YES HASH
|
||||
t1 0 a 4 d A NULL NULL NULL YES HASH
|
||||
t1 0 a 5 e A NULL NULL NULL YES HASH
|
||||
t1 0 a 6 f A NULL NULL NULL YES HASH
|
||||
t1 0 a 7 g A NULL NULL NULL YES HASH
|
||||
insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
|
||||
(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
|
||||
select * from t1 limit 3;
|
||||
a b c d e f g
|
||||
1 1 1 1 1 1 1
|
||||
2 2 2 2 2 2 2
|
||||
3 3 3 3 3 3 3
|
||||
#key b_c
|
||||
update t1 set b=2 ,c=2 where a=1;
|
||||
ERROR 23000: Duplicate entry '2-2' for key 'b'
|
||||
update t1 set b=b+34, c=c+34 where e=1 and g=1 ;
|
||||
update t1 set b=35, c=35 where e=1 and g=1 ;
|
||||
update t1 set b=b+1, c=c+1 where a>0;
|
||||
ERROR 23000: Duplicate entry '3-3' for key 'b'
|
||||
update ignore t1 set b=b+1, c=c+1 where a>0;
|
||||
select * from t1 ;
|
||||
a b c d e f g
|
||||
1 37 37 1 1 1 1
|
||||
2 2 2 2 2 2 2
|
||||
3 3 3 3 3 3 3
|
||||
4 4 4 4 4 4 4
|
||||
5 5 5 5 5 5 5
|
||||
6 6 6 6 6 6 6
|
||||
7 7 7 7 7 7 7
|
||||
8 8 8 8 8 8 8
|
||||
9 10 10 9 9 9 9
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
|
||||
(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
|
||||
#key b_f no hash key
|
||||
update t1 set b=2 , f=2 where a=1;
|
||||
ERROR 23000: Duplicate entry '2-2' for key 'b_2'
|
||||
update t1 set b=b+33, f=f+33 where e=1 and g=1;
|
||||
update t1 set b=34, f=34 where e=1 and g=1 ;
|
||||
update t1 set b=b+1, f=f+1 where a>0;
|
||||
ERROR 23000: Duplicate entry '3-3' for key 'b_2'
|
||||
update ignore t1 set b=b+1, f=f+1 where a>0;
|
||||
select * from t1 ;
|
||||
a b c d e f g
|
||||
1 36 1 1 1 36 1
|
||||
2 2 2 2 2 2 2
|
||||
3 3 3 3 3 3 3
|
||||
4 4 4 4 4 4 4
|
||||
5 5 5 5 5 5 5
|
||||
6 6 6 6 6 6 6
|
||||
7 7 7 7 7 7 7
|
||||
8 8 8 8 8 8 8
|
||||
9 10 9 9 9 10 9
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
|
||||
(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
|
||||
#key e_g
|
||||
update t1 set e=2 , g=2 where a=1;
|
||||
ERROR 23000: Duplicate entry '2-2' for key 'e'
|
||||
update t1 set e=e+34, g=g+34 where a=1;
|
||||
update t1 set e=34, g=34 where e=1 and g=1 ;
|
||||
select * from t1 where a=1;
|
||||
a b c d e f g
|
||||
1 1 1 1 35 1 35
|
||||
update t1 set e=e+1, g=g+1 where a>0;
|
||||
ERROR 23000: Duplicate entry '3-3' for key 'e'
|
||||
update ignore t1 set e=e+1, g=g+1 where a>0;
|
||||
select * from t1 ;
|
||||
a b c d e f g
|
||||
1 1 1 1 37 1 37
|
||||
2 2 2 2 2 2 2
|
||||
3 3 3 3 3 3 3
|
||||
4 4 4 4 4 4 4
|
||||
5 5 5 5 5 5 5
|
||||
6 6 6 6 6 6 6
|
||||
7 7 7 7 7 7 7
|
||||
8 8 8 8 8 8 8
|
||||
9 9 9 9 10 9 10
|
||||
drop table t1;
|
138
mysql-test/main/long_unique_update.test
Normal file
138
mysql-test/main/long_unique_update.test
Normal file
|
@ -0,0 +1,138 @@
|
|||
--echo #structure of tests;
|
||||
--echo #1 test of table containing single unique blob column;
|
||||
--echo #2 test of table containing another unique int/ varchar etc column;
|
||||
--echo #3 test of table containing multiple unique blob column like unique(a),unique(b);
|
||||
--echo #4 test of table containing multiple multiple unique blob column like unique(a,b...),unique(c,d....);
|
||||
--echo #structure of each test;
|
||||
--echo #test if update works;
|
||||
--echo #test update for duplicate entry;
|
||||
--echo #test update for no change keys;
|
||||
--echo #test update for ignore ;
|
||||
|
||||
--echo #test 1
|
||||
create table t1 (a blob unique);
|
||||
query_vertical show keys from t1;
|
||||
insert into t1 values(1),(2),(3),(4),(5);
|
||||
select * from t1;
|
||||
update t1 set a=11 where a=5;
|
||||
update t1 set a=a+20 where a=1;
|
||||
select * from t1;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set a=3 where a=2;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set a=4 where a=3;
|
||||
--echo #no change in blob key
|
||||
update t1 set a=3 where a=3;
|
||||
update t1 set a=2 where a=2;
|
||||
select* from t1;
|
||||
--echo #IGNORE;
|
||||
update ignore t1 set a=3 where a=2;
|
||||
update ignore t1 set a=4 where a=3;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #test 2;
|
||||
create table t1 (a int primary key, b blob unique , c int unique );
|
||||
show keys from t1;
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
select * from t1 limit 3;
|
||||
update t1 set b=34 where a=1;
|
||||
update t1 set b=a+c+b+34 where b=2;
|
||||
update t1 set b=a+10+b where c=3;
|
||||
select * from t1;
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=4 where a=3;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=a+1 where b=3;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=a+1 where c=3;
|
||||
--echo #no change in blob key
|
||||
update t1 set b=3 where a=3;
|
||||
update t1 set b=2 where b=2;
|
||||
update t1 set b=5 where c=5;
|
||||
select* from t1;
|
||||
--echo #IGNORE;
|
||||
update ignore t1 set b=3 where a=2;
|
||||
update ignore t1 set b=4 where b=3;
|
||||
update ignore t1 set b=5 where c=3;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #test 3;
|
||||
create table t1 (a blob unique, b blob unique , c blob unique);
|
||||
show keys from t1;
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
select * from t1 limit 3;
|
||||
update t1 set b=34 where a=1;
|
||||
update t1 set b=a+c+b+34 where b=2;
|
||||
update t1 set b=a+10+b where c=3;
|
||||
select * from t1;
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=4 where a=3;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=a+1 where b=3;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=a+1 where c=3;
|
||||
--echo #no change in blob key
|
||||
update t1 set b=3 where a=3;
|
||||
update t1 set b=2 where b=2;
|
||||
update t1 set b=5 where c=5;
|
||||
select* from t1;
|
||||
--echo #IGNORE;
|
||||
update ignore t1 set b=3 where a=2;
|
||||
update ignore t1 set b=4 where b=3;
|
||||
update ignore t1 set b=5 where c=3;
|
||||
update ignore t1 set b=b+3 where a>1 or b>1 or c>1;
|
||||
select * from t1;
|
||||
update ignore t1 set b=b+5 where a>1 and b<5 and c<a+b;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
--echo #test 4 ultimate test;
|
||||
create table t1 (a int primary key , b int, c blob , d blob , e varchar(2000), f int , g text,
|
||||
unique (b,c), unique (b,f),unique(e,g),unique(a,b,c,d,e,f,g));
|
||||
desc t1;
|
||||
show create table t1;
|
||||
show keys from t1;
|
||||
insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
|
||||
(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
|
||||
select * from t1 limit 3;
|
||||
--echo #key b_c
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=2 ,c=2 where a=1;
|
||||
update t1 set b=b+34, c=c+34 where e=1 and g=1 ;
|
||||
update t1 set b=35, c=35 where e=1 and g=1 ;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=b+1, c=c+1 where a>0;
|
||||
update ignore t1 set b=b+1, c=c+1 where a>0;
|
||||
select * from t1 ;
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
|
||||
(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
|
||||
--echo #key b_f no hash key
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=2 , f=2 where a=1;
|
||||
update t1 set b=b+33, f=f+33 where e=1 and g=1;
|
||||
update t1 set b=34, f=34 where e=1 and g=1 ;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set b=b+1, f=f+1 where a>0;
|
||||
update ignore t1 set b=b+1, f=f+1 where a>0;
|
||||
select * from t1 ;
|
||||
truncate table t1;
|
||||
insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
|
||||
(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
|
||||
--echo #key e_g
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set e=2 , g=2 where a=1;
|
||||
update t1 set e=e+34, g=g+34 where a=1;
|
||||
update t1 set e=34, g=34 where e=1 and g=1 ;
|
||||
select * from t1 where a=1;
|
||||
--error ER_DUP_ENTRY
|
||||
update t1 set e=e+1, g=g+1 where a>0;
|
||||
update ignore t1 set e=e+1, g=g+1 where a>0;
|
||||
select * from t1 ;
|
||||
drop table t1;
|
54
mysql-test/main/long_unique_using_hash.result
Normal file
54
mysql-test/main/long_unique_using_hash.result
Normal file
|
@ -0,0 +1,54 @@
|
|||
create table t1(a blob , unique(a) using hash);
|
||||
show keys from t1;;
|
||||
Table t1
|
||||
Non_unique 0
|
||||
Key_name a
|
||||
Seq_in_index 1
|
||||
Column_name a
|
||||
Collation A
|
||||
Cardinality NULL
|
||||
Sub_part NULL
|
||||
Packed NULL
|
||||
Null YES
|
||||
Index_type HASH
|
||||
Comment
|
||||
Index_comment
|
||||
drop table t1;
|
||||
create table t1(a blob , unique(a) using btree);
|
||||
ERROR 42000: Specified key was too long; max key length is 1000 bytes
|
||||
create table t1(a int , unique(a) using hash);
|
||||
show keys from t1;;
|
||||
Table t1
|
||||
Non_unique 0
|
||||
Key_name a
|
||||
Seq_in_index 1
|
||||
Column_name a
|
||||
Collation A
|
||||
Cardinality NULL
|
||||
Sub_part NULL
|
||||
Packed NULL
|
||||
Null YES
|
||||
Index_type HASH
|
||||
Comment
|
||||
Index_comment
|
||||
drop table t1;
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash);
|
||||
show keys from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||
t1 0 a 1 a A NULL NULL NULL YES HASH
|
||||
t1 0 a 2 b A NULL NULL NULL YES HASH
|
||||
t1 0 a 3 c A NULL NULL NULL YES HASH
|
||||
insert into t1 values(1,1,1);
|
||||
insert into t1 values(1,1,1);
|
||||
ERROR 23000: Duplicate entry '1-1-1' for key 'a'
|
||||
drop table t1;
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
|
||||
show keys from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
|
||||
t1 0 a 1 a NULL NULL NULL NULL YES HASH
|
||||
t1 0 a 2 b NULL NULL NULL NULL YES HASH
|
||||
t1 0 a 3 c NULL 0 NULL NULL YES HASH
|
||||
insert into t1 values(1,1,1);
|
||||
insert into t1 values(1,1,1);
|
||||
ERROR 23000: Duplicate entry '1-1-1' for key 'a'
|
||||
drop table t1;
|
25
mysql-test/main/long_unique_using_hash.test
Normal file
25
mysql-test/main/long_unique_using_hash.test
Normal file
|
@ -0,0 +1,25 @@
|
|||
|
||||
create table t1(a blob , unique(a) using hash);
|
||||
--query_vertical show keys from t1;
|
||||
drop table t1;
|
||||
|
||||
--error ER_TOO_LONG_KEY
|
||||
create table t1(a blob , unique(a) using btree);
|
||||
|
||||
create table t1(a int , unique(a) using hash);
|
||||
--query_vertical show keys from t1;
|
||||
drop table t1;
|
||||
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash);
|
||||
show keys from t1;
|
||||
insert into t1 values(1,1,1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,1,1);
|
||||
drop table t1;
|
||||
|
||||
create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
|
||||
show keys from t1;
|
||||
insert into t1 values(1,1,1);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1,1,1);
|
||||
drop table t1;
|
|
@ -369,8 +369,6 @@ HELLO MY 1
|
|||
a 1
|
||||
hello 1
|
||||
drop table t1;
|
||||
create table t1 (a text, unique (a(2100)));
|
||||
ERROR 42000: Specified key was too long; max key length is 1000 bytes
|
||||
create table t1 (a text, key (a(2100)));
|
||||
Warnings:
|
||||
Note 1071 Specified key was too long; max key length is 1000 bytes
|
||||
|
|
|
@ -131,8 +131,6 @@ select c,count(*) from t1 group by c;
|
|||
select d,count(*) from t1 group by d;
|
||||
drop table t1;
|
||||
|
||||
-- error 1071
|
||||
create table t1 (a text, unique (a(2100))); # should give an error
|
||||
create table t1 (a text, key (a(2100))); # key is auto-truncated
|
||||
show create table t1;
|
||||
drop table t1;
|
||||
|
|
|
@ -66,7 +66,7 @@ a
|
|||
alter table t1 engine=myisam;
|
||||
explain select * from t1 where a in (869751,736494,226312,802616);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index uniq_id uniq_id 4 NULL 5 Using where; Using index
|
||||
1 SIMPLE t1 index uniq_id uniq_id 8 NULL 5 Using where; Using index
|
||||
drop table t1;
|
||||
create table t1 (x int not null, y int not null, key x using HASH (x), unique y using HASH (y))
|
||||
engine=heap;
|
||||
|
|
|
@ -353,8 +353,6 @@ test.t1 check status OK
|
|||
drop table t1;
|
||||
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255), KEY t1 (a, b, c, d, e));
|
||||
ERROR 42000: Specified key was too long; max key length is 1000 bytes
|
||||
CREATE TABLE t1 (a varchar(32000), unique key(a));
|
||||
ERROR 42000: Specified key was too long; max key length is 1000 bytes
|
||||
CREATE TABLE t1 (a varchar(1), b varchar(1), key (a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b));
|
||||
ERROR 42000: Too many key parts specified; max 32 parts allowed
|
||||
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255));
|
||||
|
|
|
@ -374,8 +374,6 @@ drop table t1;
|
|||
|
||||
--error 1071
|
||||
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255), KEY t1 (a, b, c, d, e));
|
||||
--error 1071
|
||||
CREATE TABLE t1 (a varchar(32000), unique key(a));
|
||||
--error 1070
|
||||
CREATE TABLE t1 (a varchar(1), b varchar(1), key (a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b));
|
||||
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255));
|
||||
|
|
|
@ -9886,7 +9886,7 @@ int Field_bit::key_cmp(const uchar *str, uint length)
|
|||
}
|
||||
|
||||
|
||||
int Field_bit::cmp_offset(uint row_offset)
|
||||
int Field_bit::cmp_offset(my_ptrdiff_t row_offset)
|
||||
{
|
||||
if (bit_len)
|
||||
{
|
||||
|
|
|
@ -553,7 +553,7 @@ public:
|
|||
name.str= NULL;
|
||||
name.length= 0;
|
||||
};
|
||||
~Virtual_column_info() {}
|
||||
~Virtual_column_info() {};
|
||||
enum_vcol_info_type get_vcol_type() const
|
||||
{
|
||||
return vcol_type;
|
||||
|
@ -1083,7 +1083,7 @@ public:
|
|||
virtual int cmp(const uchar *,const uchar *)=0;
|
||||
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
|
||||
{ return memcmp(a,b,pack_length()); }
|
||||
virtual int cmp_offset(uint row_offset)
|
||||
virtual int cmp_offset(my_ptrdiff_t row_offset)
|
||||
{ return cmp(ptr,ptr+row_offset); }
|
||||
virtual int cmp_binary_offset(uint row_offset)
|
||||
{ return cmp_binary(ptr, ptr+row_offset); };
|
||||
|
@ -4311,7 +4311,7 @@ public:
|
|||
int key_cmp(const uchar *a, const uchar *b)
|
||||
{ return cmp_binary((uchar *) a, (uchar *) b); }
|
||||
int key_cmp(const uchar *str, uint length);
|
||||
int cmp_offset(uint row_offset);
|
||||
int cmp_offset(my_ptrdiff_t row_offset);
|
||||
bool update_min(Field *min_val, bool force_update)
|
||||
{
|
||||
longlong val= val_int();
|
||||
|
|
171
sql/handler.cc
171
sql/handler.cc
|
@ -3768,6 +3768,8 @@ void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
setup_keyinfo_hash(key);
|
||||
/* Table is opened and defined at this point */
|
||||
key_unpack(&str,table, key);
|
||||
uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg);
|
||||
|
@ -3778,6 +3780,8 @@ void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag)
|
|||
}
|
||||
my_printf_error(ER_DUP_ENTRY, msg, errflag, str.c_ptr_safe(),
|
||||
key->name.str);
|
||||
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
re_setup_keyinfo_hash(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3795,7 +3799,6 @@ void print_keydup_error(TABLE *table, KEY *key, myf errflag)
|
|||
errflag);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Print error that we got from handler function.
|
||||
|
||||
|
@ -4288,6 +4291,8 @@ uint handler::get_dup_key(int error)
|
|||
DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
|
||||
m_lock_type != F_UNLCK);
|
||||
DBUG_ENTER("handler::get_dup_key");
|
||||
if (table->s->long_unique_table && table->file->errkey < table->s->keys)
|
||||
DBUG_RETURN(table->file->errkey);
|
||||
table->file->errkey = (uint) -1;
|
||||
if (error == HA_ERR_FOUND_DUPP_KEY ||
|
||||
error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
|
||||
|
@ -6483,6 +6488,162 @@ static int wsrep_after_row(THD *thd)
|
|||
}
|
||||
#endif /* WITH_WSREP */
|
||||
|
||||
static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_rec,
|
||||
uint key_no)
|
||||
{
|
||||
Field *hash_field;
|
||||
int result, error= 0;
|
||||
KEY *key_info= table->key_info + key_no;
|
||||
hash_field= key_info->key_part->field;
|
||||
DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
|
||||
key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
|
||||
|| key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
|
||||
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
|
||||
|
||||
if (hash_field->is_real_null())
|
||||
return 0;
|
||||
|
||||
key_copy(ptr, new_rec, key_info, key_info->key_length, false);
|
||||
|
||||
if (!table->check_unique_buf)
|
||||
table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
|
||||
table->s->reclength);
|
||||
|
||||
result= h->ha_index_init(key_no, 0);
|
||||
if (result)
|
||||
return result;
|
||||
result= h->ha_index_read_map(table->check_unique_buf,
|
||||
ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
|
||||
if (!result)
|
||||
{
|
||||
bool is_same;
|
||||
Field * t_field;
|
||||
Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
|
||||
Item ** arguments= temp->arguments();
|
||||
uint arg_count= temp->argument_count();
|
||||
do
|
||||
{
|
||||
long diff= table->check_unique_buf - new_rec;
|
||||
is_same= true;
|
||||
for (uint j=0; is_same && j < arg_count; j++)
|
||||
{
|
||||
DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
|
||||
// this one for left(fld_name,length)
|
||||
arguments[j]->type() == Item::FUNC_ITEM);
|
||||
if (arguments[j]->type() == Item::FIELD_ITEM)
|
||||
{
|
||||
t_field= static_cast<Item_field *>(arguments[j])->field;
|
||||
if (t_field->cmp_offset(diff))
|
||||
is_same= false;
|
||||
}
|
||||
else
|
||||
{
|
||||
Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
|
||||
DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name()));
|
||||
DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
|
||||
t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
|
||||
longlong length= fnc->arguments()[1]->val_int();
|
||||
if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length))
|
||||
is_same= false;
|
||||
}
|
||||
}
|
||||
}
|
||||
while (!is_same && !(result= table->file->ha_index_next_same(table->check_unique_buf,
|
||||
ptr, key_info->key_length)));
|
||||
if (is_same)
|
||||
{
|
||||
table->file->errkey= key_no;
|
||||
error= HA_ERR_FOUND_DUPP_KEY;
|
||||
goto exit;
|
||||
}
|
||||
else
|
||||
goto exit;
|
||||
}
|
||||
if (result == HA_ERR_LOCK_WAIT_TIMEOUT)
|
||||
{
|
||||
table->file->errkey= key_no;
|
||||
error= HA_ERR_LOCK_WAIT_TIMEOUT;
|
||||
}
|
||||
exit:
|
||||
h->ha_index_end();
|
||||
return error;
|
||||
}
|
||||
|
||||
/** @brief
|
||||
check whether inserted records breaks the
|
||||
unique constraint on long columns.
|
||||
@returns 0 if no duplicate else returns error
|
||||
*/
|
||||
static int check_duplicate_long_entries(TABLE *table, handler *h, uchar *new_rec)
|
||||
{
|
||||
table->file->errkey= -1;
|
||||
int result;
|
||||
for (uint i= 0; i < table->s->keys; i++)
|
||||
{
|
||||
if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
|
||||
(result= check_duplicate_long_entry_key(table, h, new_rec, i)))
|
||||
return result;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @brief
|
||||
check whether updated records breaks the
|
||||
unique constraint on long columns.
|
||||
In the case of update we just need to check the specic key
|
||||
reason for that is consider case
|
||||
create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
|
||||
,unique(x,y))
|
||||
and update statement like this
|
||||
update t1 set a=23+a; in this case if we try to scan for
|
||||
whole keys in table then index scan on x_y will return 0
|
||||
because data is same so in the case of update we take
|
||||
key as a parameter in normal insert key should be -1
|
||||
@returns 0 if no duplicate else returns error
|
||||
*/
|
||||
static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *new_rec)
|
||||
{
|
||||
Field *field;
|
||||
uint key_parts;
|
||||
int error= 0;
|
||||
KEY *keyinfo;
|
||||
KEY_PART_INFO *keypart;
|
||||
/*
|
||||
Here we are comparing whether new record and old record are same
|
||||
with respect to fields in hash_str
|
||||
*/
|
||||
long reclength= table->record[1]-table->record[0];
|
||||
if (!table->update_handler)
|
||||
table->clone_handler_for_update();
|
||||
for (uint i= 0; i < table->s->keys; i++)
|
||||
{
|
||||
keyinfo= table->key_info + i;
|
||||
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
key_parts= fields_in_hash_keyinfo(keyinfo);
|
||||
keypart= keyinfo->key_part - key_parts;
|
||||
for (uint j= 0; j < key_parts; j++, keypart++)
|
||||
{
|
||||
field= keypart->field;
|
||||
/* Compare fields if they are different then check for duplicates*/
|
||||
if(field->cmp_binary_offset(reclength))
|
||||
{
|
||||
if((error= check_duplicate_long_entry_key(table, table->update_handler,
|
||||
new_rec, i)))
|
||||
goto exit;
|
||||
/*
|
||||
break because check_duplicate_long_entries_key will
|
||||
take care of remaining fields
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
exit:
|
||||
return error;
|
||||
}
|
||||
|
||||
int handler::ha_write_row(uchar *buf)
|
||||
{
|
||||
int error;
|
||||
|
@ -6496,6 +6657,9 @@ int handler::ha_write_row(uchar *buf)
|
|||
mark_trx_read_write();
|
||||
increment_statistics(&SSV::ha_write_count);
|
||||
|
||||
if (table->s->long_unique_table &&
|
||||
(error= check_duplicate_long_entries(table, table->file, buf)))
|
||||
DBUG_RETURN(error);
|
||||
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_WRITE_ROW, MAX_KEY, 0,
|
||||
{ error= write_row(buf); })
|
||||
|
||||
|
@ -6535,6 +6699,11 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
|
|||
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
|
||||
mark_trx_read_write();
|
||||
increment_statistics(&SSV::ha_update_count);
|
||||
if (table->s->long_unique_table &&
|
||||
(error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data)))
|
||||
{
|
||||
return error;
|
||||
}
|
||||
|
||||
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_UPDATE_ROW, active_index, 0,
|
||||
{ error= update_row(old_data, new_data);})
|
||||
|
|
|
@ -323,7 +323,10 @@ enum enum_alter_inplace_result {
|
|||
/** whether every data field explicitly stores length
|
||||
(holds for InnoDB ROW_FORMAT=REDUNDANT) */
|
||||
#define HA_EXTENDED_TYPES_CONVERSION (1ULL << 57)
|
||||
#define HA_LAST_TABLE_FLAG HA_EXTENDED_TYPES_CONVERSION
|
||||
|
||||
/* Support native hash index */
|
||||
#define HA_CAN_HASH_KEYS (1ULL << 58)
|
||||
#define HA_LAST_TABLE_FLAG HA_CAN_HASH_KEYS
|
||||
|
||||
/* bits in index_flags(index_number) for what you can do with index */
|
||||
#define HA_READ_NEXT 1 /* TODO really use this flag */
|
||||
|
@ -3128,7 +3131,7 @@ public:
|
|||
check_table_binlog_row_based_done(0),
|
||||
check_table_binlog_row_based_result(0),
|
||||
row_already_logged(0),
|
||||
in_range_check_pushed_down(FALSE),
|
||||
in_range_check_pushed_down(FALSE), errkey(-1),
|
||||
key_used_on_scan(MAX_KEY),
|
||||
active_index(MAX_KEY), keyread(MAX_KEY),
|
||||
ref_length(sizeof(my_off_t)),
|
||||
|
|
|
@ -1714,6 +1714,46 @@ bool Item_func_mod::fix_length_and_dec()
|
|||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
static void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
|
||||
{
|
||||
CHARSET_INFO *cs;
|
||||
uchar l[4];
|
||||
int4store(l, str->length());
|
||||
cs= str->charset();
|
||||
cs->coll->hash_sort(cs, l, sizeof(l), &nr1, &nr2);
|
||||
cs= str->charset();
|
||||
cs->coll->hash_sort(cs, (uchar *)str->ptr(), str->length(), &nr1, &nr2);
|
||||
}
|
||||
|
||||
longlong Item_func_hash::val_int()
|
||||
{
|
||||
DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
|
||||
unsigned_flag= true;
|
||||
ulong nr1= 1,nr2= 4;
|
||||
String * str;
|
||||
for(uint i= 0;i<arg_count;i++)
|
||||
{
|
||||
str = args[i]->val_str();
|
||||
if(args[i]->null_value)
|
||||
{
|
||||
null_value= 1;
|
||||
return 0;
|
||||
}
|
||||
calc_hash_for_unique(nr1, nr2, str);
|
||||
}
|
||||
null_value= 0;
|
||||
return (longlong)nr1;
|
||||
}
|
||||
|
||||
|
||||
bool Item_func_hash::fix_length_and_dec()
|
||||
{
|
||||
decimals= 0;
|
||||
max_length= 8;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
double Item_func_neg::real_op()
|
||||
{
|
||||
|
|
|
@ -1031,6 +1031,19 @@ public:
|
|||
};
|
||||
|
||||
|
||||
class Item_func_hash: public Item_int_func
|
||||
{
|
||||
public:
|
||||
Item_func_hash(THD *thd, List<Item> &item): Item_int_func(thd, item)
|
||||
{}
|
||||
longlong val_int();
|
||||
bool fix_length_and_dec();
|
||||
const Type_handler *type_handler() const { return &type_handler_long; }
|
||||
Item *get_copy(THD *thd)
|
||||
{ return get_item_copy<Item_func_hash>(thd, this); }
|
||||
const char *func_name() const { return "<hash>"; }
|
||||
};
|
||||
|
||||
class Item_longlong_func: public Item_int_func
|
||||
{
|
||||
public:
|
||||
|
|
|
@ -441,7 +441,8 @@ void key_unpack(String *to, TABLE *table, KEY *key)
|
|||
}
|
||||
}
|
||||
field_unpack(to, key_part->field, table->record[0], key_part->length,
|
||||
MY_TEST(key_part->key_part_flag & HA_PART_KEY_SEG));
|
||||
MY_TEST(key_part->key_part_flag & HA_PART_KEY_SEG ||
|
||||
(key->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length)));
|
||||
}
|
||||
dbug_tmp_restore_column_map(table->read_set, old_map);
|
||||
DBUG_VOID_RETURN;
|
||||
|
|
|
@ -7953,3 +7953,5 @@ ER_PERIOD_COLUMNS_UPDATED
|
|||
|
||||
ER_PERIOD_CONSTRAINT_DROP
|
||||
eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
|
||||
ER_TOO_LONG_HASH_KEYSEG
|
||||
eng "Max key segment length is 65535"
|
||||
|
|
|
@ -17904,6 +17904,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
|
|||
table->intersect_keys.init();
|
||||
table->keys_in_use_for_query.init();
|
||||
table->no_rows_with_nulls= param->force_not_null_cols;
|
||||
table->update_handler= NULL;
|
||||
table->check_unique_buf= NULL;
|
||||
|
||||
table->s= share;
|
||||
init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
|
||||
|
|
|
@ -2304,7 +2304,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
|
|||
hton->field_options);
|
||||
}
|
||||
|
||||
key_info= table->key_info;
|
||||
key_info= table->s->key_info;
|
||||
primary_key= share->primary_key;
|
||||
|
||||
for (uint i=0 ; i < share->keys ; i++,key_info++)
|
||||
|
@ -2352,14 +2352,16 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
|
|||
if (key_part->field &&
|
||||
(key_part->length !=
|
||||
table->field[key_part->fieldnr-1]->key_length() &&
|
||||
!(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))))
|
||||
!(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))) &&
|
||||
(key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
|
||||
key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
|
||||
{
|
||||
packet->append_parenthesized((long) key_part->length /
|
||||
key_part->field->charset()->mbmaxlen);
|
||||
}
|
||||
}
|
||||
packet->append(')');
|
||||
store_key_options(thd, packet, table, key_info);
|
||||
store_key_options(thd, packet, table, &table->key_info[i]);
|
||||
if (key_info->parser)
|
||||
{
|
||||
LEX_CSTRING *parser_name= plugin_name(key_info->parser);
|
||||
|
@ -2494,7 +2496,8 @@ static void store_key_options(THD *thd, String *packet, TABLE *table,
|
|||
if (key_info->algorithm == HA_KEY_ALG_BTREE)
|
||||
packet->append(STRING_WITH_LEN(" USING BTREE"));
|
||||
|
||||
if (key_info->algorithm == HA_KEY_ALG_HASH)
|
||||
if (key_info->algorithm == HA_KEY_ALG_HASH ||
|
||||
key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
packet->append(STRING_WITH_LEN(" USING HASH"));
|
||||
|
||||
/* send USING only in non-default case: non-spatial rtree */
|
||||
|
@ -6623,20 +6626,27 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
|
|||
table->field[8]->set_notnull();
|
||||
}
|
||||
KEY *key=show_table->key_info+i;
|
||||
if (key->rec_per_key[j])
|
||||
if (key->rec_per_key[j] && key->algorithm != HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
ha_rows records= (ha_rows) ((double) show_table->stat_records() /
|
||||
key->actual_rec_per_key(j));
|
||||
table->field[9]->store((longlong) records, TRUE);
|
||||
table->field[9]->set_notnull();
|
||||
}
|
||||
const char *tmp= show_table->file->index_type(i);
|
||||
table->field[13]->store(tmp, strlen(tmp), cs);
|
||||
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
table->field[13]->store(STRING_WITH_LEN("HASH"), cs);
|
||||
else
|
||||
{
|
||||
const char *tmp= show_table->file->index_type(i);
|
||||
table->field[13]->store(tmp, strlen(tmp), cs);
|
||||
}
|
||||
}
|
||||
if (!(key_info->flags & HA_FULLTEXT) &&
|
||||
(key_part->field &&
|
||||
key_part->length !=
|
||||
show_table->s->field[key_part->fieldnr-1]->key_length()))
|
||||
show_table->s->field[key_part->fieldnr-1]->key_length()) &&
|
||||
(key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
|
||||
key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
|
||||
{
|
||||
table->field[10]->store((longlong) key_part->length /
|
||||
key_part->field->charset()->mbmaxlen, TRUE);
|
||||
|
@ -6882,7 +6892,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
|
|||
{
|
||||
List<FOREIGN_KEY_INFO> f_key_list;
|
||||
TABLE *show_table= tables->table;
|
||||
KEY *key_info=show_table->key_info;
|
||||
KEY *key_info=show_table->s->key_info;
|
||||
uint primary_key= show_table->s->primary_key;
|
||||
show_table->file->info(HA_STATUS_VARIABLE |
|
||||
HA_STATUS_NO_LOCK |
|
||||
|
@ -7080,7 +7090,7 @@ static int get_schema_key_column_usage_record(THD *thd,
|
|||
{
|
||||
List<FOREIGN_KEY_INFO> f_key_list;
|
||||
TABLE *show_table= tables->table;
|
||||
KEY *key_info=show_table->key_info;
|
||||
KEY *key_info=show_table->s->key_info;
|
||||
uint primary_key= show_table->s->primary_key;
|
||||
show_table->file->info(HA_STATUS_VARIABLE |
|
||||
HA_STATUS_NO_LOCK |
|
||||
|
|
166
sql/sql_table.cc
166
sql/sql_table.cc
|
@ -2778,6 +2778,8 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
|
|||
|
||||
This will make checking for duplicated keys faster and ensure that
|
||||
PRIMARY keys are prioritized.
|
||||
This will not reorder LONG_HASH indexes, because they must match the
|
||||
order of their LONG_UNIQUE_HASH_FIELD's.
|
||||
*/
|
||||
|
||||
static int sort_keys(KEY *a, KEY *b)
|
||||
|
@ -2790,6 +2792,9 @@ static int sort_keys(KEY *a, KEY *b)
|
|||
return -1;
|
||||
if ((a_flags ^ b_flags) & HA_NULL_PART_KEY)
|
||||
{
|
||||
if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
|
||||
b->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
return a->usable_key_parts - b->usable_key_parts;
|
||||
/* Sort NOT NULL keys before other keys */
|
||||
return (a_flags & HA_NULL_PART_KEY) ? 1 : -1;
|
||||
}
|
||||
|
@ -3026,7 +3031,6 @@ void promote_first_timestamp_column(List<Create_field> *column_definitions)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Check if there is a duplicate key. Report a warning for every duplicate key.
|
||||
|
||||
|
@ -3292,6 +3296,65 @@ int mysql_add_invisible_field(THD *thd, List<Create_field> * field_list,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define LONG_HASH_FIELD_NAME_LENGTH 30
|
||||
static inline void make_long_hash_field_name(LEX_CSTRING *buf, uint num)
|
||||
{
|
||||
buf->length= my_snprintf((char *)buf->str,
|
||||
LONG_HASH_FIELD_NAME_LENGTH, "DB_ROW_HASH_%u", num);
|
||||
}
|
||||
/**
|
||||
Add fully invisible hash field to table in case of long
|
||||
unique column
|
||||
@param thd Thread Context.
|
||||
@param create_list List of table fields.
|
||||
@param key_info current long unique key info
|
||||
*/
|
||||
static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
|
||||
KEY *key_info)
|
||||
{
|
||||
List_iterator<Create_field> it(*create_list);
|
||||
// CHARSET_INFO *field_cs;
|
||||
Create_field *dup_field, *cf= new (thd->mem_root) Create_field();
|
||||
cf->flags|= UNSIGNED_FLAG | LONG_UNIQUE_HASH_FIELD;
|
||||
cf->decimals= 0;
|
||||
cf->length= cf->char_length= cf->pack_length= HA_HASH_FIELD_LENGTH;
|
||||
cf->invisible= INVISIBLE_FULL;
|
||||
cf->pack_flag|= FIELDFLAG_MAYBE_NULL;
|
||||
uint num= 1;
|
||||
LEX_CSTRING field_name;
|
||||
field_name.str= (char *)thd->alloc(LONG_HASH_FIELD_NAME_LENGTH);
|
||||
make_long_hash_field_name(&field_name, num);
|
||||
/*
|
||||
Check for collisions
|
||||
*/
|
||||
while ((dup_field= it++))
|
||||
{
|
||||
if (!my_strcasecmp(system_charset_info, field_name.str, dup_field->field_name.str))
|
||||
{
|
||||
num++;
|
||||
make_long_hash_field_name(&field_name, num);
|
||||
it.rewind();
|
||||
}
|
||||
}
|
||||
/* for (uint i= 0; i < key_info->user_defined_key_parts; i++)
|
||||
{
|
||||
dup_field= create_list->elem(key_info->key_part[i].fieldnr);
|
||||
if (!i)
|
||||
field_cs= dup_field->charset;
|
||||
else if(field_cs != dup_field->charset)
|
||||
{
|
||||
my_error(ER_MULTIPLE_CS_HASH_KEY, MYF(0));
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
cf->charset= field_cs;*/
|
||||
cf->field_name= field_name;
|
||||
cf->set_handler(&type_handler_longlong);
|
||||
key_info->algorithm= HA_KEY_ALG_LONG_HASH;
|
||||
create_list->push_back(cf,thd->mem_root);
|
||||
return cf;
|
||||
}
|
||||
|
||||
Key *
|
||||
mysql_add_invisible_index(THD *thd, List<Key> *key_list,
|
||||
LEX_CSTRING* field_name, enum Key::Keytype type)
|
||||
|
@ -3349,6 +3412,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
uint total_uneven_bit_length= 0;
|
||||
int select_field_count= C_CREATE_SELECT(create_table_mode);
|
||||
bool tmp_table= create_table_mode == C_ALTER_TABLE;
|
||||
bool is_hash_field_needed= false;
|
||||
DBUG_ENTER("mysql_prepare_create_table");
|
||||
|
||||
DBUG_EXECUTE_IF("test_pseudo_invisible",{
|
||||
|
@ -3666,6 +3730,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
uint key_length=0;
|
||||
Key_part_spec *column;
|
||||
|
||||
is_hash_field_needed= false;
|
||||
if (key->name.str == ignore_key)
|
||||
{
|
||||
/* ignore redundant keys */
|
||||
|
@ -3887,8 +3952,13 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
column->length= MAX_LEN_GEOM_POINT_FIELD;
|
||||
if (!column->length)
|
||||
{
|
||||
my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
|
||||
DBUG_RETURN(TRUE);
|
||||
if (key->type == Key::PRIMARY)
|
||||
{
|
||||
my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
else
|
||||
is_hash_field_needed= true;
|
||||
}
|
||||
}
|
||||
#ifdef HAVE_SPATIAL
|
||||
|
@ -3967,9 +4037,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
if (key_part_length > max_key_length ||
|
||||
key_part_length > file->max_key_part_length())
|
||||
{
|
||||
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
|
||||
if (key->type == Key::MULTIPLE)
|
||||
{
|
||||
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
|
||||
/* not a critical problem */
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
|
||||
ER_TOO_LONG_KEY,
|
||||
|
@ -3979,10 +4049,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
|
||||
}
|
||||
else
|
||||
{
|
||||
my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
is_hash_field_needed= true;
|
||||
}
|
||||
}
|
||||
// Catch invalid use of partial keys
|
||||
|
@ -4006,7 +4073,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
|
||||
key_part_length= column->length;
|
||||
}
|
||||
else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG))
|
||||
else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG) &&
|
||||
!is_hash_field_needed)
|
||||
{
|
||||
my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
|
||||
column->field_name.str);
|
||||
|
@ -4015,9 +4083,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
if (key_part_length > file->max_key_part_length() &&
|
||||
key->type != Key::FULLTEXT)
|
||||
{
|
||||
key_part_length= file->max_key_part_length();
|
||||
if (key->type == Key::MULTIPLE)
|
||||
{
|
||||
key_part_length= file->max_key_part_length();
|
||||
/* not a critical problem */
|
||||
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
|
||||
|
@ -4027,18 +4095,38 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
}
|
||||
else
|
||||
{
|
||||
my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if(key->type == Key::UNIQUE)
|
||||
{
|
||||
is_hash_field_needed= true;
|
||||
}
|
||||
key_part_info->length= (uint16) key_part_length;
|
||||
else
|
||||
{
|
||||
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
|
||||
my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* We can not store key_part_length more then 2^16 - 1 in frm
|
||||
So we will simply make it zero */
|
||||
if (is_hash_field_needed && column->length > (1<<16) - 1)
|
||||
{
|
||||
my_error(ER_TOO_LONG_HASH_KEYSEG, MYF(0));
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
else
|
||||
key_part_info->length= (uint16) key_part_length;
|
||||
if (is_hash_field_needed &&
|
||||
(key_part_info->length == sql_field->char_length * sql_field->charset->mbmaxlen ||
|
||||
key_part_info->length == (1<<16) -1))
|
||||
key_part_info->length= 0;
|
||||
/* Use packed keys for long strings on the first column */
|
||||
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
|
||||
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
|
||||
(key_part_length >= KEY_DEFAULT_PACK_LENGTH &&
|
||||
(sql_field->real_field_type() == MYSQL_TYPE_STRING ||
|
||||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR ||
|
||||
sql_field->pack_flag & FIELDFLAG_BLOB)))
|
||||
sql_field->pack_flag & FIELDFLAG_BLOB))&& !is_hash_field_needed)
|
||||
{
|
||||
if ((column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) ||
|
||||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR)
|
||||
|
@ -4087,12 +4175,41 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
if (key->type == Key::UNIQUE && !(key_info->flags & HA_NULL_PART_KEY))
|
||||
unique_key=1;
|
||||
key_info->key_length=(uint16) key_length;
|
||||
if (key_length > max_key_length && key->type != Key::FULLTEXT)
|
||||
if (key_length > max_key_length && key->type != Key::FULLTEXT &&
|
||||
!is_hash_field_needed)
|
||||
{
|
||||
my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
if (is_hash_field_needed &&
|
||||
key_info->algorithm != HA_KEY_ALG_UNDEF &&
|
||||
key_info->algorithm != HA_KEY_ALG_HASH )
|
||||
{
|
||||
my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
if (is_hash_field_needed ||
|
||||
(key_info->algorithm == HA_KEY_ALG_HASH &&
|
||||
key_info->flags & HA_NOSAME &&
|
||||
!(file->ha_table_flags() & HA_CAN_HASH_KEYS ) &&
|
||||
file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS))
|
||||
{
|
||||
Create_field *hash_fld= add_hash_field(thd, &alter_info->create_list,
|
||||
key_info);
|
||||
if (!hash_fld)
|
||||
DBUG_RETURN(TRUE);
|
||||
hash_fld->offset= record_offset;
|
||||
hash_fld->charset= create_info->default_table_charset;
|
||||
record_offset+= hash_fld->pack_length;
|
||||
if (key_info->flags & HA_NULL_PART_KEY)
|
||||
null_fields++;
|
||||
else
|
||||
{
|
||||
hash_fld->flags|= NOT_NULL_FLAG;
|
||||
hash_fld->pack_flag&= ~FIELDFLAG_MAYBE_NULL;
|
||||
}
|
||||
}
|
||||
if (validate_comment_length(thd, &key->key_create_info.comment,
|
||||
INDEX_COMMENT_MAXLEN,
|
||||
ER_TOO_LONG_INDEX_COMMENT,
|
||||
|
@ -4108,7 +4225,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||
|
||||
// Check if a duplicate index is defined.
|
||||
check_duplicate_key(thd, key, key_info, &alter_info->key_list);
|
||||
|
||||
key_info++;
|
||||
}
|
||||
|
||||
|
@ -8193,11 +8309,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
Collect all keys which isn't in drop list. Add only those
|
||||
for which some fields exists.
|
||||
*/
|
||||
|
||||
for (uint i=0 ; i < table->s->keys ; i++,key_info++)
|
||||
{
|
||||
if (key_info->flags & HA_INVISIBLE_KEY)
|
||||
continue;
|
||||
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
setup_keyinfo_hash(key_info);
|
||||
const char *key_name= key_info->name.str;
|
||||
Alter_drop *drop;
|
||||
drop_it.rewind();
|
||||
|
@ -8267,6 +8384,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
if (cfield->field) // Not new field
|
||||
{
|
||||
/*
|
||||
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
Field *fld= cfield->field;
|
||||
if (fld->max_display_length() == cfield->length*fld->charset()->mbmaxlen
|
||||
&& fld->max_data_length() != key_part->length)
|
||||
cfield->length= cfield->char_length= key_part->length;
|
||||
}
|
||||
If the field can't have only a part used in a key according to its
|
||||
new type, or should not be used partially according to its
|
||||
previous type, or the field length is less than the key part
|
||||
|
@ -8321,6 +8445,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
enum Key::Keytype key_type;
|
||||
LEX_CSTRING tmp_name;
|
||||
bzero((char*) &key_create_info, sizeof(key_create_info));
|
||||
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
key_info->flags|= HA_NOSAME;
|
||||
key_info->algorithm= HA_KEY_ALG_UNDEF;
|
||||
}
|
||||
key_create_info.algorithm= key_info->algorithm;
|
||||
/*
|
||||
We copy block size directly as some engines, like Area, sets this
|
||||
|
@ -8360,6 +8489,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||
|
||||
tmp_name.str= key_name;
|
||||
tmp_name.length= strlen(key_name);
|
||||
/* We dont need LONG_UNIQUE_HASH_FIELD flag because it will be autogenerated */
|
||||
key= new Key(key_type, &tmp_name, &key_create_info,
|
||||
MY_TEST(key_info->flags & HA_GENERATED_KEY),
|
||||
&key_parts, key_info->option_list, DDL_options());
|
||||
|
|
|
@ -1137,6 +1137,8 @@ update_begin:
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (table->update_handler)
|
||||
table->delete_update_handler();
|
||||
ANALYZE_STOP_TRACKING(&explain->command_tracker);
|
||||
table->auto_increment_field_not_null= FALSE;
|
||||
dup_key_found= 0;
|
||||
|
@ -2369,6 +2371,8 @@ multi_update::~multi_update()
|
|||
for (table= update_tables ; table; table= table->next_local)
|
||||
{
|
||||
table->table->no_keyread= 0;
|
||||
if (table->table->update_handler)
|
||||
table->table->delete_update_handler();
|
||||
if (ignore)
|
||||
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
}
|
||||
|
|
283
sql/table.cc
283
sql/table.cc
|
@ -822,6 +822,12 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
|
|||
}
|
||||
key_part->store_length=key_part->length;
|
||||
}
|
||||
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
|
||||
//Storing key hash
|
||||
key_part++;
|
||||
}
|
||||
|
||||
/*
|
||||
Add primary key to end of extended keys for non unique keys for
|
||||
|
@ -855,7 +861,9 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
|
|||
if (j == first_key_parts)
|
||||
keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME;
|
||||
}
|
||||
share->ext_key_parts+= keyinfo->ext_key_parts;
|
||||
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
share->ext_key_parts++;
|
||||
share->ext_key_parts+= keyinfo->ext_key_parts;
|
||||
}
|
||||
keynames=(char*) key_part;
|
||||
strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos) - keynames;
|
||||
|
@ -1167,10 +1175,57 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
|
|||
pos+= expr_length;
|
||||
}
|
||||
|
||||
/* Now, initialize CURRENT_TIMESTAMP fields */
|
||||
/* Now, initialize CURRENT_TIMESTAMP and UNIQUE_INDEX_HASH_FIELD fields */
|
||||
for (field_ptr= table->field; *field_ptr; field_ptr++)
|
||||
{
|
||||
{
|
||||
Field *field= *field_ptr;
|
||||
if (field->flags & LONG_UNIQUE_HASH_FIELD)
|
||||
{
|
||||
List<Item> *field_list= new (mem_root) List<Item>();
|
||||
Item *list_item;
|
||||
KEY *key;
|
||||
uint key_index, parts;
|
||||
for (key_index= 0; key_index < table->s->keys; key_index++)
|
||||
{
|
||||
key=table->key_info + key_index;
|
||||
parts= key->user_defined_key_parts;
|
||||
if (key->algorithm == HA_KEY_ALG_LONG_HASH &&
|
||||
key->key_part[key->user_defined_key_parts].fieldnr == field->field_index+ 1)
|
||||
break;
|
||||
}
|
||||
KEY_PART_INFO *keypart;
|
||||
for (uint i=0; i < parts; i++)
|
||||
{
|
||||
keypart= key->key_part + i;
|
||||
if (!keypart->length)
|
||||
{
|
||||
list_item= new(mem_root)Item_field(thd, keypart->field);
|
||||
}
|
||||
else
|
||||
{
|
||||
int length= keypart->length/keypart->field->charset()->mbmaxlen;
|
||||
list_item= new(mem_root)Item_func_left(thd,
|
||||
new (mem_root)Item_field(thd, keypart->field),
|
||||
new (mem_root) Item_int(thd, length));
|
||||
list_item->fix_fields(thd, NULL);
|
||||
// keypart->key_part_flag |= HA_PART_KEY_SEG;
|
||||
}
|
||||
field_list->push_back(list_item, mem_root);
|
||||
}
|
||||
Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
|
||||
Virtual_column_info *v= new (mem_root) Virtual_column_info();
|
||||
field->vcol_info= v;
|
||||
field->vcol_info->expr= hash_item;
|
||||
key->user_defined_key_parts= key->ext_key_parts= key->usable_key_parts= 1;
|
||||
key->key_part+= parts;
|
||||
|
||||
if (key->flags & HA_NULL_PART_KEY)
|
||||
key->key_length= HA_HASH_KEY_LENGTH_WITH_NULL;
|
||||
else
|
||||
key->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
|
||||
|
||||
*(vfield_ptr++)= *field_ptr;
|
||||
}
|
||||
if (field->has_default_now_unireg_check())
|
||||
{
|
||||
expr_str.length(parse_vcol_keyword.length);
|
||||
|
@ -1493,7 +1548,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
uint interval_count, interval_parts, read_length, int_length;
|
||||
uint db_create_options, keys, key_parts, n_length;
|
||||
uint com_length, null_bit_pos, UNINIT_VAR(mysql57_vcol_null_bit_pos), bitmap_count;
|
||||
uint i;
|
||||
uint i, hash_fields= 0;
|
||||
bool use_hash, mysql57_null_bits= 0;
|
||||
char *keynames, *names, *comment_pos;
|
||||
const uchar *forminfo;
|
||||
|
@ -1863,8 +1918,11 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
share, len, &first_keyinfo, keynames))
|
||||
goto err;
|
||||
}
|
||||
|
||||
share->key_block_size= uint2korr(frm_image+62);
|
||||
keyinfo= share->key_info;
|
||||
for (uint i= 0; i < share->keys; i++, keyinfo++)
|
||||
if(keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
hash_fields++;
|
||||
|
||||
if (share->db_plugin && !plugin_equals(share->db_plugin, se_plugin))
|
||||
goto err; // wrong engine (someone changed the frm under our feet?)
|
||||
|
@ -2371,6 +2429,41 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
/* Fix key->name and key_part->field */
|
||||
if (key_parts)
|
||||
{
|
||||
keyinfo= share->key_info;
|
||||
uint hash_field_used_no= share->fields - hash_fields;
|
||||
KEY_PART_INFO *hash_keypart;
|
||||
Field *hash_field;
|
||||
uint offset= share->reclength - HA_HASH_FIELD_LENGTH * hash_fields;
|
||||
for (uint i= 0; i < share->keys; i++, keyinfo++)
|
||||
{
|
||||
/*
|
||||
We need set value in hash key_part
|
||||
*/
|
||||
|
||||
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
share->long_unique_table= 1;
|
||||
if (share->frm_version < FRM_VER_EXPRESSSIONS)
|
||||
share->frm_version= FRM_VER_EXPRESSSIONS;
|
||||
hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
|
||||
hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
|
||||
hash_keypart->store_length= hash_keypart->length;
|
||||
hash_keypart->type= HA_KEYTYPE_ULONGLONG;
|
||||
hash_keypart->key_part_flag= 0;
|
||||
hash_keypart->key_type= 32834;
|
||||
/* Last n fields are unique_index_hash fields*/
|
||||
hash_keypart->offset= offset;
|
||||
// hash_keypart->offset= share->reclength
|
||||
// - HA_HASH_FIELD_LENGTH*(share->fields - hash_field_used_no);
|
||||
hash_keypart->fieldnr= hash_field_used_no + 1;
|
||||
hash_field= share->field[hash_field_used_no];
|
||||
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
|
||||
keyinfo->flags|= HA_NOSAME;
|
||||
share->virtual_fields++;
|
||||
hash_field_used_no++;
|
||||
offset+= HA_HASH_FIELD_LENGTH;
|
||||
}
|
||||
}
|
||||
uint add_first_key_parts= 0;
|
||||
longlong ha_option= handler_file->ha_table_flags();
|
||||
keyinfo= share->key_info;
|
||||
|
@ -2378,7 +2471,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
primary_key_name) ? MAX_KEY : 0;
|
||||
KEY* key_first_info= NULL;
|
||||
|
||||
if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME)
|
||||
if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME &&
|
||||
keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
/*
|
||||
If the UNIQUE key doesn't have NULL columns and is not a part key
|
||||
|
@ -2477,9 +2571,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
if (field->null_ptr)
|
||||
len_null_byte= HA_KEY_NULL_LENGTH;
|
||||
|
||||
if (field->type() == MYSQL_TYPE_BLOB ||
|
||||
if ((field->type() == MYSQL_TYPE_BLOB ||
|
||||
field->real_type() == MYSQL_TYPE_VARCHAR ||
|
||||
field->type() == MYSQL_TYPE_GEOMETRY)
|
||||
field->type() == MYSQL_TYPE_GEOMETRY) &&
|
||||
keyinfo->algorithm != HA_KEY_ALG_LONG_HASH )
|
||||
{
|
||||
length_bytes= HA_KEY_BLOB_LENGTH;
|
||||
}
|
||||
|
@ -2529,7 +2624,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
}
|
||||
share->ext_key_parts+= keyinfo->ext_key_parts;
|
||||
}
|
||||
if (new_key_part != keyinfo->key_part)
|
||||
if (new_key_part != keyinfo->key_part &&
|
||||
/* Dont override hash_keypart */
|
||||
!(new_key_part->field &&
|
||||
new_key_part->field->flags & LONG_UNIQUE_HASH_FIELD))
|
||||
{
|
||||
memmove(new_key_part, keyinfo->key_part,
|
||||
sizeof(KEY_PART_INFO) * keyinfo->ext_key_parts);
|
||||
|
@ -2544,6 +2642,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
key_part= keyinfo->key_part;
|
||||
uint key_parts= share->use_ext_keys ? keyinfo->ext_key_parts :
|
||||
keyinfo->user_defined_key_parts;
|
||||
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
key_parts++;
|
||||
for (i=0; i < key_parts; key_part++, i++)
|
||||
{
|
||||
Field *field;
|
||||
|
@ -2557,8 +2657,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
|
||||
field= key_part->field= share->field[key_part->fieldnr-1];
|
||||
key_part->type= field->key_type();
|
||||
|
||||
if (field->invisible > INVISIBLE_USER && !field->vers_sys_field())
|
||||
keyinfo->flags |= HA_INVISIBLE_KEY;
|
||||
if (keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
|
||||
keyinfo->flags |= HA_INVISIBLE_KEY;
|
||||
if (field->null_ptr)
|
||||
{
|
||||
key_part->null_offset=(uint) ((uchar*) field->null_ptr -
|
||||
|
@ -2584,7 +2686,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
key_part->key_part_flag|= HA_BIT_PART;
|
||||
|
||||
if (i == 0 && key != primary_key)
|
||||
field->flags |= (((keyinfo->flags & HA_NOSAME) &&
|
||||
field->flags |= (((keyinfo->flags & HA_NOSAME ||
|
||||
keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) &&
|
||||
(keyinfo->user_defined_key_parts == 1)) ?
|
||||
UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
|
||||
if (i == 0)
|
||||
|
@ -2622,7 +2725,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
field->part_of_sortkey= share->keys_in_use;
|
||||
}
|
||||
}
|
||||
if (field->key_length() != key_part->length)
|
||||
if (field->key_length() != key_part->length &&
|
||||
keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
#ifndef TO_BE_DELETED_ON_PRODUCTION
|
||||
if (field->type() == MYSQL_TYPE_NEWDECIMAL)
|
||||
|
@ -2664,7 +2768,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||
if (!(key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
|
||||
HA_BIT_PART)) &&
|
||||
key_part->type != HA_KEYTYPE_FLOAT &&
|
||||
key_part->type != HA_KEYTYPE_DOUBLE)
|
||||
key_part->type == HA_KEYTYPE_DOUBLE &&
|
||||
keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
|
||||
key_part->key_part_flag|= HA_CAN_MEMCMP;
|
||||
}
|
||||
keyinfo->usable_key_parts= usable_parts; // Filesort
|
||||
|
@ -3332,6 +3437,84 @@ static bool check_vcol_forward_refs(Field *field, Virtual_column_info *vcol,
|
|||
return res;
|
||||
}
|
||||
|
||||
static void print_long_unique_table(TABLE *table)
|
||||
{
|
||||
char buff[256];
|
||||
String str;
|
||||
KEY *key_info_table, *key_info_share;
|
||||
KEY_PART_INFO *key_part;
|
||||
Field *field;
|
||||
my_snprintf(buff, sizeof(buff), "Printing Table state, It will print table fields,"
|
||||
" fields->offset,field->null_bit, field->null_pos and key_info ... \n"
|
||||
"\nPrinting Table keyinfo\n");
|
||||
str.append(buff, strlen(buff));
|
||||
my_snprintf(buff, sizeof(buff), "\ntable->s->reclength %d\n"
|
||||
"table->s->fields %d\n",
|
||||
table->s->reclength, table->s->fields);
|
||||
str.append(buff, strlen(buff));
|
||||
for (uint i= 0; i < table->s->keys; i++)
|
||||
{
|
||||
key_info_table= table->key_info + i;
|
||||
key_info_share= table->s->key_info + i;
|
||||
my_snprintf(buff, sizeof(buff), "\ntable->key_info[%d] user_defined_key_parts = %d\n"
|
||||
"table->key_info[%d] algorithm == HA_KEY_ALG_LONG_HASH = %d\n"
|
||||
"table->key_info[%d] flags & HA_NOSAME = %d\n",
|
||||
i, key_info_table->user_defined_key_parts,
|
||||
i, key_info_table->algorithm == HA_KEY_ALG_LONG_HASH,
|
||||
i, key_info_table->flags & HA_NOSAME);
|
||||
str.append(buff, strlen(buff));
|
||||
my_snprintf(buff, sizeof(buff), "\ntable->s->key_info[%d] user_defined_key_parts = %d\n"
|
||||
"table->s->key_info[%d] algorithm == HA_KEY_ALG_LONG_HASH = %d\n"
|
||||
"table->s->key_info[%d] flags & HA_NOSAME = %d\n",
|
||||
i, key_info_share->user_defined_key_parts,
|
||||
i, key_info_share->algorithm == HA_KEY_ALG_LONG_HASH,
|
||||
i, key_info_share->flags & HA_NOSAME);
|
||||
str.append(buff, strlen(buff));
|
||||
key_part = key_info_table->key_part;
|
||||
my_snprintf(buff, sizeof(buff), "\nPrinting table->key_info[%d].key_part[0] info\n"
|
||||
"key_part->offset = %d\n"
|
||||
"key_part->field_name = %s\n"
|
||||
"key_part->length = %d\n"
|
||||
"key_part->null_bit = %d\n"
|
||||
"key_part->null_offset = %d\n",
|
||||
i, key_part->offset, key_part->field->field_name.str, key_part->length,
|
||||
key_part->null_bit, key_part->null_offset);
|
||||
str.append(buff, strlen(buff));
|
||||
|
||||
for (uint j= 0; j < key_info_share->user_defined_key_parts; j++)
|
||||
{
|
||||
key_part= key_info_share->key_part + j;
|
||||
my_snprintf(buff, sizeof(buff), "\nPrinting share->key_info[%d].key_part[%d] info\n"
|
||||
"key_part->offset = %d\n"
|
||||
"key_part->field_name = %s\n"
|
||||
"key_part->length = %d\n"
|
||||
"key_part->null_bit = %d\n"
|
||||
"key_part->null_offset = %d\n",
|
||||
i,j,key_part->offset, key_part->field->field_name.str, key_part->length,
|
||||
key_part->null_bit, key_part->null_offset);
|
||||
str.append(buff, strlen(buff));
|
||||
}
|
||||
}
|
||||
my_snprintf(buff, sizeof(buff), "\nPrinting table->fields\n");
|
||||
str.append(buff, strlen(buff));
|
||||
for(uint i= 0; i < table->s->fields; i++)
|
||||
{
|
||||
field= table->field[i];
|
||||
my_snprintf(buff, sizeof(buff), "\ntable->field[%d]->field_name %s\n"
|
||||
"table->field[%d]->offset = %d\n"
|
||||
"table->field[%d]->field_length = %d\n"
|
||||
"table->field[%d]->null_pos wrt to record 0 = %d\n"
|
||||
"table->field[%d]->null_bit_pos = %d\n",
|
||||
i, field->field_name.str,
|
||||
i, field->ptr- table->record[0],
|
||||
i, field->pack_length(),
|
||||
i, field->null_bit ? field->null_ptr - table->record[0] : -1,
|
||||
i, field->null_bit);
|
||||
str.append(buff, strlen(buff));
|
||||
}
|
||||
(*error_handler_hook)(1, str.ptr(), ME_NOTE);
|
||||
}
|
||||
|
||||
/*
|
||||
Open a table based on a TABLE_SHARE
|
||||
|
||||
|
@ -3525,6 +3708,11 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
|
|||
|
||||
key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
|
||||
key_info->user_defined_key_parts) ;
|
||||
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
{
|
||||
key_part_end++;
|
||||
key_info->flags&= ~HA_NOSAME;
|
||||
}
|
||||
for ( ; key_part < key_part_end; key_part++)
|
||||
{
|
||||
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
|
||||
|
@ -3773,6 +3961,8 @@ partititon_err:
|
|||
thd->status_var.opened_tables++;
|
||||
|
||||
thd->lex->context_analysis_only= save_context_analysis_only;
|
||||
DBUG_EXECUTE_IF("print_long_unique_internal_state",
|
||||
print_long_unique_table(outparam););
|
||||
DBUG_RETURN (OPEN_FRM_OK);
|
||||
|
||||
err:
|
||||
|
@ -4252,7 +4442,7 @@ uint calculate_key_len(TABLE *table, uint key, const uchar *buf,
|
|||
/* works only with key prefixes */
|
||||
DBUG_ASSERT(((keypart_map + 1) & keypart_map) == 0);
|
||||
|
||||
KEY *key_info= table->s->key_info+key;
|
||||
KEY *key_info= table->key_info+key;
|
||||
KEY_PART_INFO *key_part= key_info->key_part;
|
||||
KEY_PART_INFO *end_key_part= key_part + table->actual_n_key_parts(key_info);
|
||||
uint length= 0;
|
||||
|
@ -4862,6 +5052,8 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
|
|||
range_rowid_filter_cost_info_elems= 0;
|
||||
range_rowid_filter_cost_info_ptr= NULL;
|
||||
range_rowid_filter_cost_info= NULL;
|
||||
update_handler= NULL;
|
||||
check_unique_buf= NULL;
|
||||
#ifdef HAVE_REPLICATION
|
||||
/* used in RBR Triggers */
|
||||
master_had_triggers= 0;
|
||||
|
@ -8776,6 +8968,69 @@ double KEY::actual_rec_per_key(uint i)
|
|||
read_stats->get_avg_frequency(i) : (double) rec_per_key[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
find total number of field in hash expr
|
||||
*/
|
||||
int fields_in_hash_keyinfo(KEY *keyinfo)
|
||||
{
|
||||
Item_func_hash * temp= (Item_func_hash *)
|
||||
keyinfo->key_part->field->vcol_info->expr;
|
||||
return temp->argument_count();
|
||||
}
|
||||
/*
|
||||
setup_keyinfo_hash changes the key_info->key_part
|
||||
to be same as defined by user
|
||||
*/
|
||||
void setup_keyinfo_hash(KEY *key_info)
|
||||
{
|
||||
DBUG_ASSERT(key_info->algorithm == HA_KEY_ALG_LONG_HASH);
|
||||
DBUG_ASSERT(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD);
|
||||
uint no_of_keyparts= fields_in_hash_keyinfo(key_info);
|
||||
key_info->key_part-= no_of_keyparts;
|
||||
key_info->user_defined_key_parts= key_info->usable_key_parts=
|
||||
key_info->ext_key_parts= no_of_keyparts;
|
||||
}
|
||||
/*
|
||||
re_setup_keyinfo_hash reverts th setup_keyinfo_hash and this type of
|
||||
arrangement is expected by storage engine
|
||||
*/
|
||||
|
||||
void re_setup_keyinfo_hash(KEY *key_info)
|
||||
{
|
||||
DBUG_ASSERT(key_info->algorithm == HA_KEY_ALG_LONG_HASH);
|
||||
DBUG_ASSERT(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD));
|
||||
while(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD))
|
||||
key_info->key_part++;
|
||||
key_info->user_defined_key_parts= key_info->usable_key_parts=
|
||||
key_info->ext_key_parts= 1;
|
||||
}
|
||||
/**
|
||||
@brief clone of current handler.
|
||||
Creates a clone of handler used in update for
|
||||
unique hash key.
|
||||
*/
|
||||
void TABLE::clone_handler_for_update()
|
||||
{
|
||||
handler *update_handler= NULL;
|
||||
if (!s->long_unique_table)
|
||||
return;
|
||||
update_handler= file->clone(s->normalized_path.str,
|
||||
in_use->mem_root);
|
||||
update_handler->ha_external_lock(in_use, F_RDLCK);
|
||||
this->update_handler= update_handler;
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
@brief Deletes update handler object
|
||||
*/
|
||||
void TABLE::delete_update_handler()
|
||||
{
|
||||
update_handler->ha_external_lock(in_use, F_UNLCK);
|
||||
update_handler->ha_close();
|
||||
delete update_handler;
|
||||
this->update_handler= NULL;
|
||||
}
|
||||
|
||||
LEX_CSTRING *fk_option_name(enum_fk_option opt)
|
||||
{
|
||||
|
|
17
sql/table.h
17
sql/table.h
|
@ -349,9 +349,18 @@ enum field_visibility_t {
|
|||
INVISIBLE_FULL
|
||||
};
|
||||
|
||||
#define INVISIBLE_MAX_BITS 3
|
||||
#define INVISIBLE_MAX_BITS 3
|
||||
#define HA_HASH_FIELD_LENGTH 8
|
||||
#define HA_HASH_KEY_LENGTH_WITHOUT_NULL 8
|
||||
#define HA_HASH_KEY_LENGTH_WITH_NULL 9
|
||||
|
||||
|
||||
int fields_in_hash_keyinfo(KEY *keyinfo);
|
||||
|
||||
void setup_keyinfo_hash(KEY *key_info);
|
||||
|
||||
void re_setup_keyinfo_hash(KEY *key_info);
|
||||
|
||||
/**
|
||||
Category of table found in the table share.
|
||||
*/
|
||||
|
@ -735,6 +744,7 @@ struct TABLE_SHARE
|
|||
bool vcols_need_refixing;
|
||||
bool has_update_default_function;
|
||||
bool can_do_row_logging; /* 1 if table supports RBR */
|
||||
bool long_unique_table;
|
||||
|
||||
ulong table_map_id; /* for row-based replication */
|
||||
|
||||
|
@ -1120,6 +1130,9 @@ public:
|
|||
THD *in_use; /* Which thread uses this */
|
||||
|
||||
uchar *record[3]; /* Pointer to records */
|
||||
/* record buf to resolve hash collisions for long UNIQUE constraints */
|
||||
uchar *check_unique_buf;
|
||||
handler *update_handler; /* Handler used in case of update */
|
||||
uchar *write_row_record; /* Used as optimisation in
|
||||
THD::write_row */
|
||||
uchar *insert_values; /* used by INSERT ... UPDATE */
|
||||
|
@ -1586,6 +1599,8 @@ public:
|
|||
void vers_update_fields();
|
||||
void vers_update_end();
|
||||
void find_constraint_correlated_indexes();
|
||||
void clone_handler_for_update();
|
||||
void delete_update_handler();
|
||||
|
||||
/** Number of additional fields used in versioned tables */
|
||||
#define VERSIONING_FIELDS 2
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
/* threshold for safe_alloca */
|
||||
#define ALLOCA_THRESHOLD 2048
|
||||
|
||||
static uint pack_keys(uchar *,uint, KEY *, ulong);
|
||||
static uint pack_keys(uchar *,uint, KEY *, ulong, uint);
|
||||
static bool pack_header(THD *, uchar *, List<Create_field> &, HA_CREATE_INFO *,
|
||||
ulong, handler *);
|
||||
static bool pack_vcols(String *, List<Create_field> &, List<Virtual_column_info> *);
|
||||
|
@ -184,6 +184,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table,
|
|||
+ extra2_str_size(create_info->period_info.constr->name.length)
|
||||
+ 2 * frm_fieldno_size
|
||||
: 0;
|
||||
uint e_unique_hash_extra_parts= 0;
|
||||
uchar fileinfo[FRM_HEADER_SIZE],forminfo[FRM_FORMINFO_SIZE];
|
||||
const partition_info *part_info= IF_PARTITIONING(thd->work_part_info, 0);
|
||||
bool error;
|
||||
|
@ -306,6 +307,9 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table,
|
|||
extra2_size+= 1 + extra2_str_size(create_fields.elements);
|
||||
}
|
||||
|
||||
for (i= 0; i < keys; i++)
|
||||
if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
|
||||
e_unique_hash_extra_parts++;
|
||||
key_buff_length= uint4korr(fileinfo+47);
|
||||
|
||||
frm.length= FRM_HEADER_SIZE; // fileinfo;
|
||||
|
@ -396,7 +400,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table,
|
|||
pos+= 4;
|
||||
|
||||
DBUG_ASSERT(pos == frm_ptr + uint2korr(fileinfo+6));
|
||||
key_info_length= pack_keys(pos, keys, key_info, data_offset);
|
||||
key_info_length= pack_keys(pos, keys, key_info, data_offset, e_unique_hash_extra_parts);
|
||||
if (key_info_length > UINT_MAX16)
|
||||
{
|
||||
my_printf_error(ER_CANT_CREATE_TABLE,
|
||||
|
@ -558,7 +562,7 @@ err_frm:
|
|||
/* Pack keyinfo and keynames to keybuff for save in form-file. */
|
||||
|
||||
static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
|
||||
ulong data_offset)
|
||||
ulong data_offset, uint e_unique_hash_extra_parts)
|
||||
{
|
||||
uint key_parts,length;
|
||||
uchar *pos, *keyname_pos;
|
||||
|
@ -620,6 +624,7 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
|
|||
}
|
||||
}
|
||||
|
||||
key_parts+= e_unique_hash_extra_parts;
|
||||
if (key_count > 127 || key_parts > 127)
|
||||
{
|
||||
keybuff[0]= (key_count & 0x7f) | 0x80;
|
||||
|
|
|
@ -180,7 +180,7 @@ enum extra2_frm_value_type {
|
|||
};
|
||||
|
||||
enum extra2_field_flags {
|
||||
VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS
|
||||
VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS,
|
||||
};
|
||||
|
||||
int rea_create_table(THD *thd, LEX_CUSTRING *frm,
|
||||
|
|
|
@ -980,7 +980,7 @@ int ha_archive::write_row(uchar *buf)
|
|||
|
||||
if (table->next_number_field && record == table->record[0])
|
||||
{
|
||||
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
|
||||
KEY *mkey= &table->key_info[0]; // We only support one key right now
|
||||
update_auto_increment();
|
||||
temp_auto= table->next_number_field->val_int();
|
||||
|
||||
|
@ -1098,7 +1098,7 @@ int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
|
|||
{
|
||||
int rc;
|
||||
bool found= 0;
|
||||
KEY *mkey= &table->s->key_info[index];
|
||||
KEY *mkey= &table->key_info[index];
|
||||
current_k_offset= mkey->key_part->offset;
|
||||
current_key= key;
|
||||
current_key_len= key_len;
|
||||
|
|
|
@ -1677,10 +1677,7 @@ bool ha_connect::GetIndexOption(KEY *kp, PCSZ opname)
|
|||
/****************************************************************************/
|
||||
bool ha_connect::IsUnique(uint n)
|
||||
{
|
||||
TABLE_SHARE *s= (table) ? table->s : NULL;
|
||||
KEY kp= s->key_info[n];
|
||||
|
||||
return (kp.flags & 1) != 0;
|
||||
return (table->key_info[n].flags & HA_NOSAME) != 0;
|
||||
} // end of IsUnique
|
||||
|
||||
/****************************************************************************/
|
||||
|
|
|
@ -51,7 +51,7 @@ public:
|
|||
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
|
||||
HA_CAN_SQL_HANDLER | HA_CAN_ONLINE_BACKUPS |
|
||||
HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
|
||||
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
|
||||
HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_HASH_KEYS);
|
||||
}
|
||||
ulong index_flags(uint inx, uint part, bool all_parts) const
|
||||
{
|
||||
|
|
|
@ -2856,6 +2856,7 @@ ulonglong ha_mroonga::wrapper_table_flags() const
|
|||
#ifdef HA_CAN_VIRTUAL_COLUMNS
|
||||
table_flags |= HA_CAN_VIRTUAL_COLUMNS;
|
||||
#endif
|
||||
table_flags |= HA_CAN_HASH_KEYS;
|
||||
DBUG_RETURN(table_flags);
|
||||
}
|
||||
|
||||
|
@ -2891,6 +2892,7 @@ ulonglong ha_mroonga::storage_table_flags() const
|
|||
#ifdef HA_CAN_VIRTUAL_COLUMNS
|
||||
flags |= HA_CAN_VIRTUAL_COLUMNS;
|
||||
#endif
|
||||
flags |= HA_CAN_HASH_KEYS;
|
||||
DBUG_RETURN(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ const std::string& oqgraph3::cursor::record_position() const
|
|||
if (_graph->_cursor->_index >= 0)
|
||||
{
|
||||
key_copy((uchar*) _graph->_cursor->_key.data(), table.record[0],
|
||||
table.s->key_info + _index, table.s->key_info[_index].key_length, true);
|
||||
table.key_info + _index, table.key_info[_index].key_length, true);
|
||||
}
|
||||
|
||||
_graph->_stale= false;
|
||||
|
@ -184,7 +184,7 @@ int oqgraph3::cursor::restore_position()
|
|||
if (int rc= table.file->ha_index_read_map(
|
||||
table.record[0], (const uchar*) _key.data(),
|
||||
(key_part_map)(1 << _parts) - 1,
|
||||
table.s->key_info[_index].user_defined_key_parts == _parts ?
|
||||
table.key_info[_index].user_defined_key_parts == _parts ?
|
||||
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT))
|
||||
{
|
||||
table.file->ha_index_end();
|
||||
|
@ -368,7 +368,7 @@ int oqgraph3::cursor::seek_to(
|
|||
if (!destid)
|
||||
{
|
||||
int i= 0;
|
||||
for( ::KEY *key_info= table.s->key_info,
|
||||
for( ::KEY *key_info= table.key_info,
|
||||
*key_end= key_info + table.s->keys;
|
||||
key_info < key_end; ++key_info, ++i)
|
||||
{
|
||||
|
@ -399,7 +399,7 @@ int oqgraph3::cursor::seek_to(
|
|||
else if (!origid)
|
||||
{
|
||||
int i= 0;
|
||||
for( ::KEY *key_info= table.s->key_info,
|
||||
for( ::KEY *key_info= table.key_info,
|
||||
*key_end= key_info + table.s->keys;
|
||||
key_info < key_end; ++key_info, ++i)
|
||||
{
|
||||
|
@ -430,7 +430,7 @@ int oqgraph3::cursor::seek_to(
|
|||
else
|
||||
{
|
||||
int i= 0;
|
||||
for( ::KEY *key_info= table.s->key_info,
|
||||
for( ::KEY *key_info= table.key_info,
|
||||
*key_end= key_info + table.s->keys;
|
||||
key_info < key_end; ++key_info, ++i)
|
||||
{
|
||||
|
@ -485,7 +485,7 @@ int oqgraph3::cursor::seek_to(
|
|||
if (int rc= table.file->ha_index_read_map(
|
||||
table.record[0], (uchar*) _key.data(),
|
||||
(key_part_map) ((1U << _parts) - 1),
|
||||
table.s->key_info[_index].user_defined_key_parts == _parts ?
|
||||
table.key_info[_index].user_defined_key_parts == _parts ?
|
||||
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT))
|
||||
{
|
||||
table.file->ha_index_end();
|
||||
|
|
|
@ -9428,7 +9428,7 @@ ulonglong ha_spider::table_flags() const
|
|||
const char *ha_spider::index_type(
|
||||
uint key_number
|
||||
) {
|
||||
KEY *key_info = &table->s->key_info[key_number];
|
||||
KEY *key_info = &table->key_info[key_number];
|
||||
DBUG_ENTER("ha_spider::index_type");
|
||||
DBUG_PRINT("info",("spider this=%p", this));
|
||||
DBUG_PRINT("info",("spider flags=%ld", key_info->flags));
|
||||
|
@ -11186,9 +11186,9 @@ double ha_spider::read_time(
|
|||
if (keyread)
|
||||
{
|
||||
DBUG_PRINT("info",("spider read_time(keyread) = %.6f",
|
||||
share->read_rate * table->s->key_info[index].key_length *
|
||||
share->read_rate * table->key_info[index].key_length *
|
||||
rows / 2 + 2));
|
||||
DBUG_RETURN(share->read_rate * table->s->key_info[index].key_length *
|
||||
DBUG_RETURN(share->read_rate * table->key_info[index].key_length *
|
||||
rows / 2 + 2);
|
||||
} else {
|
||||
DBUG_PRINT("info",("spider read_time = %.6f",
|
||||
|
|
|
@ -5145,7 +5145,7 @@ int spider_handlersocket_handler::append_open_handler(
|
|||
share->tgt_dbs[spider->conn_link_idx[link_idx]],
|
||||
share->tgt_table_names[spider->conn_link_idx[link_idx]],
|
||||
spider->active_index < MAX_KEY ?
|
||||
table->s->key_info[spider->active_index].name :
|
||||
table->key_info[spider->active_index].name :
|
||||
"0",
|
||||
str->c_ptr_safe(),
|
||||
&request_key
|
||||
|
|
|
@ -3290,10 +3290,10 @@ void spider_db_mbase::set_dup_key_idx(
|
|||
key_name_length = spider->share->tgt_pk_names_lengths[all_link_idx];
|
||||
} else {
|
||||
#ifdef SPIDER_use_LEX_CSTRING_for_KEY_Field_name
|
||||
key_name = table->s->key_info[roop_count].name.str;
|
||||
key_name_length = table->s->key_info[roop_count].name.length;
|
||||
key_name = table->key_info[roop_count].name.str;
|
||||
key_name_length = table->key_info[roop_count].name.length;
|
||||
#else
|
||||
key_name = table->s->key_info[roop_count].name;
|
||||
key_name = table->key_info[roop_count].name;
|
||||
key_name_length = strlen(key_name);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -6934,7 +6934,7 @@ void ha_tokudb::trace_create_table_info(TABLE* form) {
|
|||
field->flags);
|
||||
}
|
||||
for (i = 0; i < form->s->keys; i++) {
|
||||
KEY *key = &form->s->key_info[i];
|
||||
KEY *key = &form->key_info[i];
|
||||
TOKUDB_HANDLER_TRACE(
|
||||
"key:%d:%s:%d",
|
||||
i,
|
||||
|
@ -7062,7 +7062,7 @@ int ha_tokudb::create_secondary_dictionary(
|
|||
sprintf(dict_name, "key-%s", key_info->name.str);
|
||||
make_name(newname, newname_len, name, dict_name);
|
||||
|
||||
prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
|
||||
prim_key = (hpk) ? NULL : &form->key_info[primary_key];
|
||||
|
||||
//
|
||||
// setup the row descriptor
|
||||
|
@ -7174,7 +7174,7 @@ int ha_tokudb::create_main_dictionary(
|
|||
|
||||
make_name(newname, newname_len, name, "main");
|
||||
|
||||
prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
|
||||
prim_key = (hpk) ? NULL : &form->key_info[primary_key];
|
||||
|
||||
//
|
||||
// setup the row descriptor
|
||||
|
@ -7439,7 +7439,7 @@ int ha_tokudb::create(
|
|||
|
||||
error = write_key_name_to_status(
|
||||
status_block,
|
||||
form->s->key_info[i].name.str,
|
||||
form->key_info[i].name.str,
|
||||
txn);
|
||||
if (error) {
|
||||
goto cleanup;
|
||||
|
@ -8169,7 +8169,7 @@ int ha_tokudb::tokudb_add_index(
|
|||
for (uint i = 0; i < num_of_keys; i++) {
|
||||
for (uint j = 0; j < table_arg->s->keys; j++) {
|
||||
if (strcmp(key_info[i].name.str,
|
||||
table_arg->s->key_info[j].name.str) == 0) {
|
||||
table_arg->key_info[j].name.str) == 0) {
|
||||
error = HA_ERR_WRONG_COMMAND;
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -1606,7 +1606,7 @@ int ha_tokudb::new_row_descriptor(TABLE* altered_table,
|
|||
} else {
|
||||
KEY* prim_key =
|
||||
hidden_primary_key ? NULL :
|
||||
&altered_table->s->key_info[primary_key];
|
||||
&altered_table->key_info[primary_key];
|
||||
if (idx == primary_key) {
|
||||
row_descriptor->size = create_main_key_descriptor(
|
||||
(uchar*)row_descriptor->data,
|
||||
|
|
|
@ -451,7 +451,7 @@ static bool check_all_update_expressions(
|
|||
|
||||
static bool full_field_in_key(TABLE* table, Field* field) {
|
||||
assert_always(table->s->primary_key < table->s->keys);
|
||||
KEY* key = &table->s->key_info[table->s->primary_key];
|
||||
KEY* key = &table->key_info[table->s->primary_key];
|
||||
for (uint i = 0; i < key->user_defined_key_parts; i++) {
|
||||
KEY_PART_INFO* key_part = &key->key_part[i];
|
||||
if (strcmp(field->field_name.str, key_part->field->field_name.str) == 0) {
|
||||
|
@ -517,7 +517,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
|
|||
MY_BITMAP pk_fields;
|
||||
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
|
||||
return false;
|
||||
KEY *key = &table->s->key_info[table->s->primary_key];
|
||||
KEY *key = &table->key_info[table->s->primary_key];
|
||||
for (uint i = 0; i < key->user_defined_key_parts; i++)
|
||||
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
|
||||
|
||||
|
@ -555,7 +555,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
|
|||
static bool clustering_keys_exist(TABLE *table) {
|
||||
for (uint keynr = 0; keynr < table->s->keys; keynr++) {
|
||||
if (keynr != table->s->primary_key &&
|
||||
key_is_clustering(&table->s->key_info[keynr]))
|
||||
key_is_clustering(&table->key_info[keynr]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -357,8 +357,6 @@ HELLO MY 1
|
|||
a 1
|
||||
hello 1
|
||||
drop table t1;
|
||||
create table t1 (a text, unique (a(21000)));
|
||||
ERROR 42000: Specified key was too long; max key length is 3072 bytes
|
||||
create table t1 (a text, key (a(2100)));
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
|
|
|
@ -133,8 +133,6 @@ select c,count(*) from t1 group by c;
|
|||
select d,count(*) from t1 group by d;
|
||||
drop table t1;
|
||||
|
||||
-- error 1071
|
||||
create table t1 (a text, unique (a(21000))); # should give an error
|
||||
create table t1 (a text, key (a(2100))); # key is auto-truncated
|
||||
replace_regex /ENGINE=[a-zA-Z]*/ENGINE=ENGINE/;
|
||||
show create table t1;
|
||||
|
|
Loading…
Add table
Reference in a new issue